From 71107f4648d8f31a7bcc0aa5202ef46230df583f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johan=20Sj=C3=B6len?= Date: Wed, 25 Jan 2023 10:30:02 +0000 Subject: [PATCH] 8300651: Replace NULL with nullptr in share/runtime/ Reviewed-by: rehn, dholmes --- .../share/runtime/abstract_vm_version.cpp | 16 +- src/hotspot/share/runtime/arguments.cpp | 334 +++++++++--------- src/hotspot/share/runtime/arguments.hpp | 54 +-- src/hotspot/share/runtime/atomic.hpp | 6 +- src/hotspot/share/runtime/basicLock.cpp | 4 +- .../share/runtime/continuationFreezeThaw.cpp | 6 +- src/hotspot/share/runtime/deoptimization.cpp | 162 ++++----- src/hotspot/share/runtime/deoptimization.hpp | 4 +- src/hotspot/share/runtime/escapeBarrier.cpp | 20 +- src/hotspot/share/runtime/escapeBarrier.hpp | 6 +- src/hotspot/share/runtime/fieldDescriptor.cpp | 20 +- .../share/runtime/flags/debug_globals.hpp | 4 +- src/hotspot/share/runtime/flags/jvmFlag.cpp | 38 +- .../share/runtime/flags/jvmFlagAccess.cpp | 22 +- .../share/runtime/flags/jvmFlagAccess.hpp | 8 +- .../share/runtime/flags/jvmFlagLimit.cpp | 16 +- .../share/runtime/flags/jvmFlagLimit.hpp | 10 +- .../share/runtime/flags/jvmFlagLookup.cpp | 4 +- src/hotspot/share/runtime/frame.cpp | 158 ++++----- src/hotspot/share/runtime/frame.hpp | 20 +- src/hotspot/share/runtime/frame.inline.hpp | 16 +- src/hotspot/share/runtime/globals.hpp | 26 +- src/hotspot/share/runtime/handles.cpp | 20 +- src/hotspot/share/runtime/handles.hpp | 22 +- src/hotspot/share/runtime/handles.inline.hpp | 12 +- src/hotspot/share/runtime/handshake.cpp | 42 +-- src/hotspot/share/runtime/icache.cpp | 6 +- .../share/runtime/interfaceSupport.cpp | 6 +- .../share/runtime/interfaceSupport.inline.hpp | 6 +- src/hotspot/share/runtime/java.cpp | 34 +- src/hotspot/share/runtime/java.hpp | 8 +- src/hotspot/share/runtime/javaCalls.cpp | 12 +- src/hotspot/share/runtime/javaCalls.hpp | 4 +- src/hotspot/share/runtime/javaFrameAnchor.hpp | 12 +- src/hotspot/share/runtime/javaThread.cpp | 178 +++++----- src/hotspot/share/runtime/javaThread.hpp | 32 +- .../share/runtime/javaThread.inline.hpp | 8 +- src/hotspot/share/runtime/jniHandles.cpp | 130 +++---- src/hotspot/share/runtime/jniHandles.hpp | 8 +- .../share/runtime/jniHandles.inline.hpp | 22 +- .../share/runtime/jniPeriodicChecker.cpp | 4 +- .../share/runtime/jniPeriodicChecker.hpp | 4 +- .../share/runtime/keepStackGCProcessed.cpp | 6 +- src/hotspot/share/runtime/monitorChunk.cpp | 4 +- src/hotspot/share/runtime/mutex.cpp | 68 ++-- src/hotspot/share/runtime/mutex.hpp | 4 +- src/hotspot/share/runtime/mutexLocker.cpp | 202 +++++------ src/hotspot/share/runtime/mutexLocker.hpp | 12 +- src/hotspot/share/runtime/nonJavaThread.cpp | 32 +- src/hotspot/share/runtime/nonJavaThread.hpp | 6 +- src/hotspot/share/runtime/objectMonitor.cpp | 236 ++++++------- src/hotspot/share/runtime/objectMonitor.hpp | 16 +- .../share/runtime/objectMonitor.inline.hpp | 12 +- src/hotspot/share/runtime/orderAccess.cpp | 4 +- src/hotspot/share/runtime/os.cpp | 162 ++++----- src/hotspot/share/runtime/os.hpp | 20 +- src/hotspot/share/runtime/os.inline.hpp | 4 +- src/hotspot/share/runtime/park.cpp | 18 +- src/hotspot/share/runtime/park.hpp | 8 +- src/hotspot/share/runtime/perfData.cpp | 64 ++-- src/hotspot/share/runtime/perfData.hpp | 8 +- src/hotspot/share/runtime/perfData.inline.hpp | 4 +- src/hotspot/share/runtime/perfMemory.cpp | 26 +- src/hotspot/share/runtime/perfMemory.hpp | 4 +- src/hotspot/share/runtime/reflection.cpp | 70 ++-- src/hotspot/share/runtime/reflectionUtils.cpp | 6 +- src/hotspot/share/runtime/registerMap.hpp | 4 +- src/hotspot/share/runtime/relocator.cpp | 16 +- src/hotspot/share/runtime/relocator.hpp | 6 +- src/hotspot/share/runtime/safepoint.cpp | 32 +- src/hotspot/share/runtime/serviceThread.cpp | 16 +- src/hotspot/share/runtime/sharedRuntime.cpp | 292 +++++++-------- src/hotspot/share/runtime/sharedRuntime.hpp | 22 +- src/hotspot/share/runtime/signature.cpp | 42 +-- src/hotspot/share/runtime/signature.hpp | 6 +- .../share/runtime/stackChunkFrameStream.hpp | 4 +- src/hotspot/share/runtime/stackValue.cpp | 28 +- src/hotspot/share/runtime/stackWatermark.cpp | 18 +- .../share/runtime/stackWatermarkSet.cpp | 22 +- .../runtime/stackWatermarkSet.inline.hpp | 8 +- src/hotspot/share/runtime/statSampler.cpp | 26 +- src/hotspot/share/runtime/statSampler.hpp | 4 +- .../share/runtime/stubCodeGenerator.cpp | 8 +- .../share/runtime/stubCodeGenerator.hpp | 10 +- src/hotspot/share/runtime/stubRoutines.cpp | 214 +++++------ src/hotspot/share/runtime/stubRoutines.hpp | 10 +- src/hotspot/share/runtime/synchronizer.cpp | 92 ++--- src/hotspot/share/runtime/synchronizer.hpp | 4 +- src/hotspot/share/runtime/task.cpp | 8 +- src/hotspot/share/runtime/thread.cpp | 80 ++--- src/hotspot/share/runtime/thread.hpp | 24 +- src/hotspot/share/runtime/threadSMR.cpp | 112 +++--- src/hotspot/share/runtime/threadSMR.hpp | 24 +- .../share/runtime/threadSMR.inline.hpp | 4 +- src/hotspot/share/runtime/threads.cpp | 74 ++-- src/hotspot/share/runtime/timerTrace.cpp | 14 +- src/hotspot/share/runtime/timerTrace.hpp | 4 +- src/hotspot/share/runtime/unhandledOops.cpp | 4 +- src/hotspot/share/runtime/unhandledOops.hpp | 4 +- src/hotspot/share/runtime/vframe.cpp | 80 ++--- src/hotspot/share/runtime/vframe.hpp | 8 +- src/hotspot/share/runtime/vframe.inline.hpp | 24 +- src/hotspot/share/runtime/vframeArray.cpp | 54 +-- src/hotspot/share/runtime/vframe_hp.cpp | 52 +-- src/hotspot/share/runtime/vframe_hp.hpp | 4 +- src/hotspot/share/runtime/vmOperation.hpp | 4 +- src/hotspot/share/runtime/vmOperations.cpp | 32 +- src/hotspot/share/runtime/vmOperations.hpp | 6 +- src/hotspot/share/runtime/vmStructs.cpp | 8 +- src/hotspot/share/runtime/vmStructs.hpp | 40 +-- src/hotspot/share/runtime/vmThread.cpp | 62 ++-- src/hotspot/share/runtime/vmThread.hpp | 4 +- 112 files changed, 2067 insertions(+), 2067 deletions(-) diff --git a/src/hotspot/share/runtime/abstract_vm_version.cpp b/src/hotspot/share/runtime/abstract_vm_version.cpp index 1124507f46dc7..aff9b9e5e52a5 100644 --- a/src/hotspot/share/runtime/abstract_vm_version.cpp +++ b/src/hotspot/share/runtime/abstract_vm_version.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -309,14 +309,14 @@ void Abstract_VM_Version::insert_features_names(char* buf, size_t buflen, const bool Abstract_VM_Version::print_matching_lines_from_file(const char* filename, outputStream* st, const char* keywords_to_match[]) { char line[500]; FILE* fp = os::fopen(filename, "r"); - if (fp == NULL) { + if (fp == nullptr) { return false; } st->print_cr("Virtualization information:"); - while (fgets(line, sizeof(line), fp) != NULL) { + while (fgets(line, sizeof(line), fp) != nullptr) { int i = 0; - while (keywords_to_match[i] != NULL) { + while (keywords_to_match[i] != nullptr) { if (strncmp(line, keywords_to_match[i], strlen(keywords_to_match[i])) == 0) { st->print("%s", line); break; @@ -354,8 +354,8 @@ int Abstract_VM_Version::number_of_sockets(void) { const char* Abstract_VM_Version::cpu_name(void) { assert(_initialized, "should be initialized"); char* tmp = NEW_C_HEAP_ARRAY_RETURN_NULL(char, CPU_TYPE_DESC_BUF_SIZE, mtTracing); - if (NULL == tmp) { - return NULL; + if (nullptr == tmp) { + return nullptr; } strncpy(tmp, _cpu_name, CPU_TYPE_DESC_BUF_SIZE); return tmp; @@ -364,8 +364,8 @@ const char* Abstract_VM_Version::cpu_name(void) { const char* Abstract_VM_Version::cpu_description(void) { assert(_initialized, "should be initialized"); char* tmp = NEW_C_HEAP_ARRAY_RETURN_NULL(char, CPU_DETAILED_DESC_BUF_SIZE, mtTracing); - if (NULL == tmp) { - return NULL; + if (nullptr == tmp) { + return nullptr; } strncpy(tmp, _cpu_desc, CPU_DETAILED_DESC_BUF_SIZE); return tmp; diff --git a/src/hotspot/share/runtime/arguments.cpp b/src/hotspot/share/runtime/arguments.cpp index 5806750fbe10b..0323bb1397d46 100644 --- a/src/hotspot/share/runtime/arguments.cpp +++ b/src/hotspot/share/runtime/arguments.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -72,18 +72,18 @@ #define DEFAULT_JAVA_LAUNCHER "generic" -char* Arguments::_jvm_flags_file = NULL; -char** Arguments::_jvm_flags_array = NULL; +char* Arguments::_jvm_flags_file = nullptr; +char** Arguments::_jvm_flags_array = nullptr; int Arguments::_num_jvm_flags = 0; -char** Arguments::_jvm_args_array = NULL; +char** Arguments::_jvm_args_array = nullptr; int Arguments::_num_jvm_args = 0; -char* Arguments::_java_command = NULL; -SystemProperty* Arguments::_system_properties = NULL; +char* Arguments::_java_command = nullptr; +SystemProperty* Arguments::_system_properties = nullptr; size_t Arguments::_conservative_max_heap_alignment = 0; Arguments::Mode Arguments::_mode = _mixed; bool Arguments::_java_compiler = false; bool Arguments::_xdebug_mode = false; -const char* Arguments::_java_vendor_url_bug = NULL; +const char* Arguments::_java_vendor_url_bug = nullptr; const char* Arguments::_sun_java_launcher = DEFAULT_JAVA_LAUNCHER; bool Arguments::_sun_java_launcher_is_altjvm = false; @@ -96,8 +96,8 @@ size_t Arguments::_default_SharedBaseAddress = SharedBaseAddress; bool Arguments::_enable_preview = false; -char* Arguments::SharedArchivePath = NULL; -char* Arguments::SharedDynamicArchivePath = NULL; +char* Arguments::SharedArchivePath = nullptr; +char* Arguments::SharedDynamicArchivePath = nullptr; LegacyGCLogging Arguments::_legacyGCLogging = { 0, 0 }; @@ -107,34 +107,34 @@ AgentLibraryList Arguments::_agentList; // These are not set by the JDK's built-in launchers, but they can be set by // programs that embed the JVM using JNI_CreateJavaVM. See comments around // JavaVMOption in jni.h. -abort_hook_t Arguments::_abort_hook = NULL; -exit_hook_t Arguments::_exit_hook = NULL; -vfprintf_hook_t Arguments::_vfprintf_hook = NULL; +abort_hook_t Arguments::_abort_hook = nullptr; +exit_hook_t Arguments::_exit_hook = nullptr; +vfprintf_hook_t Arguments::_vfprintf_hook = nullptr; -SystemProperty *Arguments::_sun_boot_library_path = NULL; -SystemProperty *Arguments::_java_library_path = NULL; -SystemProperty *Arguments::_java_home = NULL; -SystemProperty *Arguments::_java_class_path = NULL; -SystemProperty *Arguments::_jdk_boot_class_path_append = NULL; -SystemProperty *Arguments::_vm_info = NULL; +SystemProperty *Arguments::_sun_boot_library_path = nullptr; +SystemProperty *Arguments::_java_library_path = nullptr; +SystemProperty *Arguments::_java_home = nullptr; +SystemProperty *Arguments::_java_class_path = nullptr; +SystemProperty *Arguments::_jdk_boot_class_path_append = nullptr; +SystemProperty *Arguments::_vm_info = nullptr; -GrowableArray *Arguments::_patch_mod_prefix = NULL; -PathString *Arguments::_boot_class_path = NULL; +GrowableArray *Arguments::_patch_mod_prefix = nullptr; +PathString *Arguments::_boot_class_path = nullptr; bool Arguments::_has_jimage = false; -char* Arguments::_ext_dirs = NULL; +char* Arguments::_ext_dirs = nullptr; // True if -Xshare:auto option was specified. static bool xshare_auto_cmd_line = false; bool PathString::set_value(const char *value, AllocFailType alloc_failmode) { char* new_value = AllocateHeap(strlen(value)+1, mtArguments, alloc_failmode); - if (new_value == NULL) { + if (new_value == nullptr) { assert(alloc_failmode == AllocFailStrategy::RETURN_NULL, "must be"); return false; } - if (_value != NULL) { + if (_value != nullptr) { FreeHeap(_value); } _value = new_value; @@ -145,15 +145,15 @@ bool PathString::set_value(const char *value, AllocFailType alloc_failmode) { void PathString::append_value(const char *value) { char *sp; size_t len = 0; - if (value != NULL) { + if (value != nullptr) { len = strlen(value); - if (_value != NULL) { + if (_value != nullptr) { len += strlen(_value); } sp = AllocateHeap(len+2, mtArguments); - assert(sp != NULL, "Unable to allocate space for new append path value"); - if (sp != NULL) { - if (_value != NULL) { + assert(sp != nullptr, "Unable to allocate space for new append path value"); + if (sp != nullptr) { + if (_value != nullptr) { strcpy(sp, _value); strcat(sp, os::path_separator()); strcat(sp, value); @@ -167,8 +167,8 @@ void PathString::append_value(const char *value) { } PathString::PathString(const char* value) { - if (value == NULL) { - _value = NULL; + if (value == nullptr) { + _value = nullptr; } else { _value = AllocateHeap(strlen(value)+1, mtArguments); strcpy(_value, value); @@ -176,14 +176,14 @@ PathString::PathString(const char* value) { } PathString::~PathString() { - if (_value != NULL) { + if (_value != nullptr) { FreeHeap(_value); - _value = NULL; + _value = nullptr; } } ModulePatchPath::ModulePatchPath(const char* module_name, const char* path) { - assert(module_name != NULL && path != NULL, "Invalid module name or path value"); + assert(module_name != nullptr && path != nullptr, "Invalid module name or path value"); size_t len = strlen(module_name) + 1; _module_name = AllocateHeap(len, mtInternal); strncpy(_module_name, module_name, len); // copy the trailing null @@ -191,24 +191,24 @@ ModulePatchPath::ModulePatchPath(const char* module_name, const char* path) { } ModulePatchPath::~ModulePatchPath() { - if (_module_name != NULL) { + if (_module_name != nullptr) { FreeHeap(_module_name); - _module_name = NULL; + _module_name = nullptr; } - if (_path != NULL) { + if (_path != nullptr) { delete _path; - _path = NULL; + _path = nullptr; } } SystemProperty::SystemProperty(const char* key, const char* value, bool writeable, bool internal) : PathString(value) { - if (key == NULL) { - _key = NULL; + if (key == nullptr) { + _key = nullptr; } else { _key = AllocateHeap(strlen(key)+1, mtArguments); strcpy(_key, key); } - _next = NULL; + _next = nullptr; _internal = internal; _writeable = writeable; } @@ -218,15 +218,15 @@ AgentLibrary::AgentLibrary(const char* name, const char* options, bool instrument_lib) { _name = AllocateHeap(strlen(name)+1, mtArguments); strcpy(_name, name); - if (options == NULL) { - _options = NULL; + if (options == nullptr) { + _options = nullptr; } else { _options = AllocateHeap(strlen(options)+1, mtArguments); strcpy(_options, options); } _is_absolute_path = is_absolute_path; _os_lib = os_lib; - _next = NULL; + _next = nullptr; _state = agent_invalid; _is_static_lib = false; _is_instrument_lib = instrument_lib; @@ -247,9 +247,9 @@ static bool match_option(const JavaVMOption *option, const char* name, // Check if 'option' matches 'name'. No "tail" is allowed. static bool match_option(const JavaVMOption *option, const char* name) { - const char* tail = NULL; + const char* tail = nullptr; bool result = match_option(option, name, &tail); - if (tail != NULL && *tail == '\0') { + if (tail != nullptr && *tail == '\0') { return result; } else { return false; @@ -261,7 +261,7 @@ static bool match_option(const JavaVMOption *option, const char* name) { // the option must match exactly. static bool match_option(const JavaVMOption* option, const char** names, const char** tail, bool tail_allowed) { - for (/* empty */; *names != NULL; ++names) { + for (/* empty */; *names != nullptr; ++names) { if (match_option(option, *names, tail)) { if (**tail == '\0' || (tail_allowed && **tail == ':')) { return true; @@ -276,8 +276,8 @@ static bool _has_jfr_option = false; // is using JFR // return true on failure static bool match_jfr_option(const JavaVMOption** option) { - assert((*option)->optionString != NULL, "invariant"); - char* tail = NULL; + assert((*option)->optionString != nullptr, "invariant"); + char* tail = nullptr; if (match_option(*option, "-XX:StartFlightRecording", (const char**)&tail)) { _has_jfr_option = true; return Jfr::on_start_flight_recording_option(option, tail); @@ -323,15 +323,15 @@ bool needs_module_property_warning = false; #define ENABLE_NATIVE_ACCESS_LEN 20 void Arguments::add_init_library(const char* name, char* options) { - _libraryList.add(new AgentLibrary(name, options, false, NULL)); + _libraryList.add(new AgentLibrary(name, options, false, nullptr)); } void Arguments::add_init_agent(const char* name, char* options, bool absolute_path) { - _agentList.add(new AgentLibrary(name, options, absolute_path, NULL)); + _agentList.add(new AgentLibrary(name, options, absolute_path, nullptr)); } void Arguments::add_instrument_agent(const char* name, char* options, bool absolute_path) { - _agentList.add(new AgentLibrary(name, options, absolute_path, NULL, true)); + _agentList.add(new AgentLibrary(name, options, absolute_path, nullptr, true)); } // Late-binding agents not started via arguments @@ -395,7 +395,7 @@ void Arguments::init_system_properties() { // Set up _boot_class_path which is not a property but // relies heavily on argument processing and the jdk.boot.class.path.append // property. It is used to store the underlying boot class path. - _boot_class_path = new PathString(NULL); + _boot_class_path = new PathString(nullptr); PropertyList_add(&_system_properties, new SystemProperty("java.vm.specification.name", "Java Virtual Machine Specification", false)); @@ -407,17 +407,17 @@ void Arguments::init_system_properties() { _vm_info = new SystemProperty("java.vm.info", VM_Version::vm_info_string(), true); // Following are JVMTI agent writable properties. - // Properties values are set to NULL and they are + // Properties values are set to nullptr and they are // os specific they are initialized in os::init_system_properties_values(). - _sun_boot_library_path = new SystemProperty("sun.boot.library.path", NULL, true); - _java_library_path = new SystemProperty("java.library.path", NULL, true); - _java_home = new SystemProperty("java.home", NULL, true); + _sun_boot_library_path = new SystemProperty("sun.boot.library.path", nullptr, true); + _java_library_path = new SystemProperty("java.library.path", nullptr, true); + _java_home = new SystemProperty("java.home", nullptr, true); _java_class_path = new SystemProperty("java.class.path", "", true); // jdk.boot.class.path.append is a non-writeable, internal property. // It can only be set by either: // - -Xbootclasspath/a: // - AddToBootstrapClassLoaderSearch during JVMTI OnLoad phase - _jdk_boot_class_path_append = new SystemProperty("jdk.boot.class.path.append", NULL, false, true); + _jdk_boot_class_path_append = new SystemProperty("jdk.boot.class.path.append", nullptr, false, true); // Add to System Property list. PropertyList_add(&_system_properties, _sun_boot_library_path); @@ -570,7 +570,7 @@ static SpecialFlag const special_jvm_flags[] = { { "dup option", JDK_Version::jdk(9), JDK_Version::undefined(), JDK_Version::undefined() }, #endif - { NULL, JDK_Version(0), JDK_Version(0) } + { nullptr, JDK_Version(0), JDK_Version(0) } }; // Flags that are aliases for other flags. @@ -582,7 +582,7 @@ typedef struct { static AliasedFlag const aliased_jvm_flags[] = { { "DefaultMaxRAMFraction", "MaxRAMFraction" }, { "CreateMinidumpOnCrash", "CreateCoredumpOnCrash" }, - { NULL, NULL} + { nullptr, nullptr} }; // Return true if "v" is less than "other", where "other" may be "undefined". @@ -596,7 +596,7 @@ static bool version_less_than(JDK_Version v, JDK_Version other) { } static bool lookup_special_flag(const char *flag_name, SpecialFlag& flag) { - for (size_t i = 0; special_jvm_flags[i].name != NULL; i++) { + for (size_t i = 0; special_jvm_flags[i].name != nullptr; i++) { if ((strcmp(special_jvm_flags[i].name, flag_name) == 0)) { flag = special_jvm_flags[i]; return true; @@ -606,7 +606,7 @@ static bool lookup_special_flag(const char *flag_name, SpecialFlag& flag) { } bool Arguments::is_obsolete_flag(const char *flag_name, JDK_Version* version) { - assert(version != NULL, "Must provide a version buffer"); + assert(version != nullptr, "Must provide a version buffer"); SpecialFlag flag; if (lookup_special_flag(flag_name, flag)) { if (!flag.obsolete_in.is_undefined()) { @@ -617,7 +617,7 @@ bool Arguments::is_obsolete_flag(const char *flag_name, JDK_Version* version) { // this version we allow some time for the removal to happen. So if the flag // still actually exists we process it as normal, but issue an adjusted warning. const JVMFlag *real_flag = JVMFlag::find_declared_flag(flag_name); - if (real_flag != NULL) { + if (real_flag != nullptr) { char version_str[256]; version->to_string(version_str, sizeof(version_str)); warning("Temporarily processing option %s; support is scheduled for removal in %s", @@ -632,7 +632,7 @@ bool Arguments::is_obsolete_flag(const char *flag_name, JDK_Version* version) { } int Arguments::is_deprecated_flag(const char *flag_name, JDK_Version* version) { - assert(version != NULL, "Must provide a version buffer"); + assert(version != nullptr, "Must provide a version buffer"); SpecialFlag flag; if (lookup_special_flag(flag_name, flag)) { if (!flag.deprecated_in.is_undefined()) { @@ -649,7 +649,7 @@ int Arguments::is_deprecated_flag(const char *flag_name, JDK_Version* version) { } const char* Arguments::real_flag_name(const char *flag_name) { - for (size_t i = 0; aliased_jvm_flags[i].alias_name != NULL; i++) { + for (size_t i = 0; aliased_jvm_flags[i].alias_name != nullptr; i++) { const AliasedFlag& flag_status = aliased_jvm_flags[i]; if (strcmp(flag_status.alias_name, flag_name) == 0) { return flag_status.real_name; @@ -660,7 +660,7 @@ const char* Arguments::real_flag_name(const char *flag_name) { #ifdef ASSERT static bool lookup_special_flag(const char *flag_name, size_t skip_index) { - for (size_t i = 0; special_jvm_flags[i].name != NULL; i++) { + for (size_t i = 0; special_jvm_flags[i].name != nullptr; i++) { if ((i != skip_index) && (strcmp(special_jvm_flags[i].name, flag_name) == 0)) { return true; } @@ -689,7 +689,7 @@ static const int SPECIAL_FLAG_VALIDATION_BUILD = 25; bool Arguments::verify_special_jvm_flags(bool check_globals) { bool success = true; - for (size_t i = 0; special_jvm_flags[i].name != NULL; i++) { + for (size_t i = 0; special_jvm_flags[i].name != nullptr; i++) { const SpecialFlag& flag = special_jvm_flags[i]; if (lookup_special_flag(flag.name, i)) { warning("Duplicate special flag declaration \"%s\"", flag.name); @@ -722,7 +722,7 @@ bool Arguments::verify_special_jvm_flags(bool check_globals) { // if flag has become obsolete it should not have a "globals" flag defined anymore. if (check_globals && VM_Version::vm_build_number() >= SPECIAL_FLAG_VALIDATION_BUILD && !version_less_than(JDK_Version::current(), flag.obsolete_in)) { - if (JVMFlag::find_declared_flag(flag.name) != NULL) { + if (JVMFlag::find_declared_flag(flag.name) != nullptr) { warning("Global variable for obsolete special flag entry \"%s\" should be removed", flag.name); success = false; } @@ -737,7 +737,7 @@ bool Arguments::verify_special_jvm_flags(bool check_globals) { // if flag has become expired it should not have a "globals" flag defined anymore. if (check_globals && VM_Version::vm_build_number() >= SPECIAL_FLAG_VALIDATION_BUILD && !version_less_than(JDK_Version::current(), flag.expired_in)) { - if (JVMFlag::find_declared_flag(flag.name) != NULL) { + if (JVMFlag::find_declared_flag(flag.name) != nullptr) { warning("Global variable for expired flag entry \"%s\" should be removed", flag.name); success = false; } @@ -846,7 +846,7 @@ static bool set_numeric_flag(JVMFlag* flag, const char* value, JVMFlagOrigin ori static bool set_string_flag(JVMFlag* flag, const char* value, JVMFlagOrigin origin) { if (value[0] == '\0') { - value = NULL; + value = nullptr; } if (JVMFlagAccess::set_ccstr(flag, &value, origin) != JVMFlag::SUCCESS) return false; // Contract: JVMFlag always returns a pointer that needs freeing. @@ -857,10 +857,10 @@ static bool set_string_flag(JVMFlag* flag, const char* value, JVMFlagOrigin orig static bool append_to_string_flag(JVMFlag* flag, const char* new_value, JVMFlagOrigin origin) { const char* old_value = ""; if (JVMFlagAccess::get_ccstr(flag, &old_value) != JVMFlag::SUCCESS) return false; - size_t old_len = old_value != NULL ? strlen(old_value) : 0; + size_t old_len = old_value != nullptr ? strlen(old_value) : 0; size_t new_len = strlen(new_value); const char* value; - char* free_this_too = NULL; + char* free_this_too = nullptr; if (old_len == 0) { value = new_value; } else if (new_len == 0) { @@ -894,7 +894,7 @@ const char* Arguments::handle_aliases_and_deprecation(const char* arg) { } // Note if we're not considered obsolete then we can't be expired either // as obsoletion must come first. - return NULL; + return nullptr; } case 0: return real_name; @@ -912,7 +912,7 @@ const char* Arguments::handle_aliases_and_deprecation(const char* arg) { } } ShouldNotReachHere(); - return NULL; + return nullptr; } #define BUFLEN 255 @@ -921,7 +921,7 @@ JVMFlag* Arguments::find_jvm_flag(const char* name, size_t name_length) { char name_copied[BUFLEN+1]; if (name[name_length] != 0) { if (name_length > BUFLEN) { - return NULL; + return nullptr; } else { strncpy(name_copied, name, name_length); name_copied[name_length] = '\0'; @@ -930,8 +930,8 @@ JVMFlag* Arguments::find_jvm_flag(const char* name, size_t name_length) { } const char* real_name = Arguments::handle_aliases_and_deprecation(name); - if (real_name == NULL) { - return NULL; + if (real_name == nullptr) { + return nullptr; } JVMFlag* flag = JVMFlag::find_flag(real_name); return flag; @@ -963,7 +963,7 @@ bool Arguments::parse_argument(const char* arg, JVMFlagOrigin origin) { } JVMFlag* flag = find_jvm_flag(name, name_len); - if (flag == NULL) { + if (flag == nullptr) { return false; } @@ -1000,16 +1000,16 @@ bool Arguments::parse_argument(const char* arg, JVMFlagOrigin origin) { } void Arguments::add_string(char*** bldarray, int* count, const char* arg) { - assert(bldarray != NULL, "illegal argument"); + assert(bldarray != nullptr, "illegal argument"); - if (arg == NULL) { + if (arg == nullptr) { return; } int new_count = *count + 1; // expand the array and add arg to the last element - if (*bldarray == NULL) { + if (*bldarray == nullptr) { *bldarray = NEW_C_HEAP_ARRAY(char*, new_count, mtArguments); } else { *bldarray = REALLOC_C_HEAP_ARRAY(char*, *bldarray, new_count, mtArguments); @@ -1029,18 +1029,18 @@ void Arguments::build_jvm_flags(const char* arg) { // utility function to return a string that concatenates all // strings in a given char** array const char* Arguments::build_resource_string(char** args, int count) { - if (args == NULL || count == 0) { - return NULL; + if (args == nullptr || count == 0) { + return nullptr; } size_t length = 0; for (int i = 0; i < count; i++) { - length += strlen(args[i]) + 1; // add 1 for a space or NULL terminating character + length += strlen(args[i]) + 1; // add 1 for a space or null terminating character } char* s = NEW_RESOURCE_ARRAY(char, length); char* dst = s; for (int j = 0; j < count; j++) { - size_t offset = strlen(args[j]) + 1; // add 1 for a space or NULL terminating character - jio_snprintf(dst, length, "%s ", args[j]); // jio_snprintf will replace the last space character with NULL character + size_t offset = strlen(args[j]) + 1; // add 1 for a space or null terminating character + jio_snprintf(dst, length, "%s ", args[j]); // jio_snprintf will replace the last space character with null character dst += offset; length -= offset; } @@ -1058,7 +1058,7 @@ void Arguments::print_on(outputStream* st) { st->cr(); } st->print_cr("java_command: %s", java_command() ? java_command() : ""); - if (_java_class_path != NULL) { + if (_java_class_path != nullptr) { char* path = _java_class_path->value(); size_t len = strlen(path); st->print("java_class_path (initial): "); @@ -1087,7 +1087,7 @@ void Arguments::print_summary_on(outputStream* st) { print_jvm_args_on(st); } // this is the classfile and any arguments to the java program - if (java_command() != NULL) { + if (java_command() != nullptr) { st->print("%s", java_command()); } st->cr(); @@ -1124,7 +1124,7 @@ bool Arguments::process_argument(const char* arg, size_t arg_len; const char* equal_sign = strchr(argname, '='); - if (equal_sign == NULL) { + if (equal_sign == nullptr) { arg_len = strlen(argname); } else { arg_len = equal_sign - argname; @@ -1146,7 +1146,7 @@ bool Arguments::process_argument(const char* arg, // For locked flags, report a custom error message if available. // Otherwise, report the standard unrecognized VM option. const JVMFlag* found_flag = JVMFlag::find_declared_flag((const char*)argname, arg_len); - if (found_flag != NULL) { + if (found_flag != nullptr) { char locked_message_buf[BUFLEN]; JVMFlag::MsgType msg_type = found_flag->get_locked_message(locked_message_buf, BUFLEN); if (strlen(locked_message_buf) == 0) { @@ -1177,7 +1177,7 @@ bool Arguments::process_argument(const char* arg, jio_fprintf(defaultStream::error_stream(), "Unrecognized VM option '%s'\n", argname); JVMFlag* fuzzy_matched = JVMFlag::fuzzy_match((const char*)argname, arg_len, true); - if (fuzzy_matched != NULL) { + if (fuzzy_matched != nullptr) { jio_fprintf(defaultStream::error_stream(), "Did you mean '%s%s%s'? ", (fuzzy_matched->is_bool()) ? "(+/-)" : "", @@ -1192,7 +1192,7 @@ bool Arguments::process_argument(const char* arg, bool Arguments::process_settings_file(const char* file_name, bool should_exist, jboolean ignore_unrecognized) { FILE* stream = os::fopen(file_name, "rb"); - if (stream == NULL) { + if (stream == nullptr) { if (should_exist) { jio_fprintf(defaultStream::error_stream(), "Could not open settings file %s\n", file_name); @@ -1266,7 +1266,7 @@ bool Arguments::add_property(const char* prop, PropertyWriteable writeable, Prop const char* key; const char* value = ""; - if (eq == NULL) { + if (eq == nullptr) { // property doesn't have a value, thus use passed string key = prop; } else { @@ -1310,7 +1310,7 @@ bool Arguments::add_property(const char* prop, PropertyWriteable writeable, Prop if (strcmp(key, "sun.java.command") == 0) { char *old_java_command = _java_command; _java_command = os::strdup_check_oom(value, mtArguments); - if (old_java_command != NULL) { + if (old_java_command != nullptr) { os::free(old_java_command); } } else if (strcmp(key, "java.vendor.url.bug") == 0) { @@ -1323,7 +1323,7 @@ bool Arguments::add_property(const char* prop, PropertyWriteable writeable, Prop // save it in _java_vendor_url_bug, so JVM fatal error handler can access // its value without going through the property list or making a Java call. _java_vendor_url_bug = os::strdup_check_oom(value, mtArguments); - if (old_java_vendor_url_bug != NULL) { + if (old_java_vendor_url_bug != nullptr) { os::free((void *)old_java_vendor_url_bug); } } @@ -1355,7 +1355,7 @@ void Arguments::check_unsupported_dumping_properties() { assert(ARRAY_SIZE(unsupported_properties) == ARRAY_SIZE(unsupported_options), "must be"); // If a vm option is found in the unsupported_options array, vm will exit with an error message. SystemProperty* sp = system_properties(); - while (sp != NULL) { + while (sp != nullptr) { for (uint i = 0; i < ARRAY_SIZE(unsupported_properties); i++) { if (strcmp(sp->key(), unsupported_properties[i]) == 0) { vm_exit_during_initialization( @@ -1374,7 +1374,7 @@ void Arguments::check_unsupported_dumping_properties() { bool Arguments::check_unsupported_cds_runtime_properties() { assert(UseSharedSpaces, "this function is only used with -Xshare:{on,auto}"); assert(ARRAY_SIZE(unsupported_properties) == ARRAY_SIZE(unsupported_options), "must be"); - if (ArchiveClassesAtExit != NULL) { + if (ArchiveClassesAtExit != nullptr) { // dynamic dumping, just return false for now. // check_unsupported_dumping_properties() will be called later to check the same set of // properties, and will exit the VM with the correct error message if the unsupported properties @@ -1382,7 +1382,7 @@ bool Arguments::check_unsupported_cds_runtime_properties() { return false; } for (uint i = 0; i < ARRAY_SIZE(unsupported_properties); i++) { - if (get_property(unsupported_properties[i]) != NULL) { + if (get_property(unsupported_properties[i]) != nullptr) { if (RequireSharedSpaces) { warning("CDS is disabled when the %s option is specified.", unsupported_options[i]); } else { @@ -1475,9 +1475,9 @@ void set_object_alignment() { size_t Arguments::max_heap_for_compressed_oops() { // Avoid sign flip. assert(OopEncodingHeapMax > (uint64_t)os::vm_page_size(), "Unusual page size"); - // We need to fit both the NULL page and the heap into the memory budget, while + // We need to fit both the null page and the heap into the memory budget, while // keeping alignment constraints of the heap. To guarantee the latter, as the - // NULL page is located before the heap, we pad the NULL page to the conservative + // null page is located before the heap, we pad the null page to the conservative // maximum alignment that the GC may ever impose upon the heap. size_t displacement_due_to_null_page = align_up((size_t)os::vm_page_size(), _conservative_max_heap_alignment); @@ -1908,7 +1908,7 @@ void Arguments::process_java_launcher_argument(const char* launcher, void* extra } bool Arguments::created_by_java_launcher() { - assert(_sun_java_launcher != NULL, "property must have value"); + assert(_sun_java_launcher != nullptr, "property must have value"); return strcmp(DEFAULT_JAVA_LAUNCHER, _sun_java_launcher) != 0; } @@ -1999,7 +1999,7 @@ bool Arguments::is_bad_option(const JavaVMOption* option, jboolean ignore, if (ignore) return false; const char* spacer = " "; - if (option_type == NULL) { + if (option_type == nullptr) { option_type = ++spacer; // Set both to the empty string. } @@ -2157,7 +2157,7 @@ bool valid_jdwp_agent(char *name, bool is_path) { size_t _len_jdwp, _len_prefix; if (is_path) { - if ((_name = strrchr(name, (int) *os::file_separator())) == NULL) { + if ((_name = strrchr(name, (int) *os::file_separator())) == nullptr) { return false; } @@ -2194,17 +2194,17 @@ bool valid_jdwp_agent(char *name, bool is_path) { int Arguments::process_patch_mod_option(const char* patch_mod_tail, bool* patch_mod_javabase) { // --patch-module==()* - assert(patch_mod_tail != NULL, "Unexpected NULL patch-module value"); + assert(patch_mod_tail != nullptr, "Unexpected null patch-module value"); // Find the equal sign between the module name and the path specification const char* module_equal = strchr(patch_mod_tail, '='); - if (module_equal == NULL) { + if (module_equal == nullptr) { jio_fprintf(defaultStream::output_stream(), "Missing '=' in --patch-module specification\n"); return JNI_ERR; } else { // Pick out the module name size_t module_len = module_equal - patch_mod_tail; char* module_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, module_len+1, mtArguments); - if (module_name != NULL) { + if (module_name != nullptr) { memcpy(module_name, patch_mod_tail, module_len); *(module_name + module_len) = '\0'; // The path piece begins one past the module_equal sign @@ -2243,7 +2243,7 @@ jint Arguments::parse_xss(const JavaVMOption* option, const char* tail, intx* ou julong size = 0; ArgsRange errcode = parse_memory_size(tail, &size, min_size, max_size); if (errcode != arg_in_range) { - bool silent = (option == NULL); // Allow testing to silence error messages + bool silent = (option == nullptr); // Allow testing to silence error messages if (!silent) { jio_fprintf(defaultStream::error_stream(), "Invalid thread stack size: %s\n", option->optionString); @@ -2345,14 +2345,14 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, bool* patch_m return JNI_EINVAL; // -Xrun } else if (match_option(option, "-Xrun", &tail)) { - if (tail != NULL) { + if (tail != nullptr) { const char* pos = strchr(tail, ':'); - size_t len = (pos == NULL) ? strlen(tail) : pos - tail; + size_t len = (pos == nullptr) ? strlen(tail) : pos - tail; char* name = NEW_C_HEAP_ARRAY(char, len + 1, mtArguments); jio_snprintf(name, len + 1, "%s", tail); - char *options = NULL; - if(pos != NULL) { + char *options = nullptr; + if(pos != nullptr) { size_t len2 = strlen(pos+1) + 1; // options start after ':'. Final zero must be copied. options = (char*)memcpy(NEW_C_HEAP_ARRAY(char, len2, mtArguments), pos+1, len2); } @@ -2410,10 +2410,10 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, bool* patch_m // -agentlib and -agentpath } else if (match_option(option, "-agentlib:", &tail) || (is_absolute_path = match_option(option, "-agentpath:", &tail))) { - if(tail != NULL) { + if(tail != nullptr) { const char* pos = strchr(tail, '='); char* name; - if (pos == NULL) { + if (pos == nullptr) { name = os::strdup_check_oom(tail, mtArguments); } else { size_t len = pos - tail; @@ -2422,8 +2422,8 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, bool* patch_m name[len] = '\0'; } - char *options = NULL; - if(pos != NULL) { + char *options = nullptr; + if(pos != nullptr) { options = os::strdup_check_oom(pos + 1, mtArguments); } #if !INCLUDE_JVMTI @@ -2442,7 +2442,7 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, bool* patch_m "Instrumentation agents are not supported in this VM\n"); return JNI_ERR; #else - if (tail != NULL) { + if (tail != nullptr) { size_t length = strlen(tail) + 1; char *options = NEW_C_HEAP_ARRAY(char, length, mtArguments); jio_snprintf(options, length, "%s", tail); @@ -2877,7 +2877,7 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, bool* patch_m } JVMFlag *jvmciFlag = JVMFlag::find_flag("EnableJVMCIProduct"); // Allow this flag if it has been unlocked. - if (jvmciFlag != NULL && jvmciFlag->is_unlocked()) { + if (jvmciFlag != nullptr && jvmciFlag->is_unlocked()) { if (!JVMCIGlobals::enable_jvmci_product_mode(origin)) { jio_fprintf(defaultStream::error_stream(), "Unable to enable JVMCI in product mode"); @@ -2936,7 +2936,7 @@ void Arguments::add_patch_mod_prefix(const char* module_name, const char* path, } // Create GrowableArray lazily, only if --patch-module has been specified - if (_patch_mod_prefix == NULL) { + if (_patch_mod_prefix == nullptr) { _patch_mod_prefix = new (mtArguments) GrowableArray(10, mtArguments); } @@ -2990,7 +2990,7 @@ jint Arguments::finalize_vm_init_args(bool patch_mod_javabase) { jio_snprintf(path, JVM_MAXPATHLEN, "%s%slib%sendorsed", Arguments::get_java_home(), fileSep, fileSep); DIR* dir = os::opendir(path); - if (dir != NULL) { + if (dir != nullptr) { jio_fprintf(defaultStream::output_stream(), "/lib/endorsed is not supported. Endorsed standards and standalone APIs\n" "in modular form will be supported via the concept of upgradeable modules.\n"); @@ -3000,7 +3000,7 @@ jint Arguments::finalize_vm_init_args(bool patch_mod_javabase) { jio_snprintf(path, JVM_MAXPATHLEN, "%s%slib%sext", Arguments::get_java_home(), fileSep, fileSep); dir = os::opendir(path); - if (dir != NULL) { + if (dir != nullptr) { jio_fprintf(defaultStream::output_stream(), "/lib/ext exists, extensions mechanism no longer supported; " "Use -classpath instead.\n."); @@ -3079,24 +3079,24 @@ jint Arguments::finalize_vm_init_args(bool patch_mod_javabase) { } // RecordDynamicDumpInfo is not compatible with ArchiveClassesAtExit - if (ArchiveClassesAtExit != NULL && RecordDynamicDumpInfo) { + if (ArchiveClassesAtExit != nullptr && RecordDynamicDumpInfo) { jio_fprintf(defaultStream::output_stream(), "-XX:+RecordDynamicDumpInfo cannot be used with -XX:ArchiveClassesAtExit.\n"); return JNI_ERR; } - if (ArchiveClassesAtExit == NULL && !RecordDynamicDumpInfo) { + if (ArchiveClassesAtExit == nullptr && !RecordDynamicDumpInfo) { DynamicDumpSharedSpaces = false; } else { DynamicDumpSharedSpaces = true; } if (AutoCreateSharedArchive) { - if (SharedArchiveFile == NULL) { + if (SharedArchiveFile == nullptr) { log_warning(cds)("-XX:+AutoCreateSharedArchive requires -XX:SharedArchiveFile"); return JNI_ERR; } - if (ArchiveClassesAtExit != NULL) { + if (ArchiveClassesAtExit != nullptr) { log_warning(cds)("-XX:+AutoCreateSharedArchive does not work with ArchiveClassesAtExit"); return JNI_ERR; } @@ -3139,11 +3139,11 @@ class ScopedVMInitArgs : public StackObj { ScopedVMInitArgs(const char *container_name) { _args.version = JNI_VERSION_1_2; _args.nOptions = 0; - _args.options = NULL; + _args.options = nullptr; _args.ignoreUnrecognized = false; _container_name = (char *)container_name; _is_set = false; - _vm_options_file_arg = NULL; + _vm_options_file_arg = nullptr; } // Populates the JavaVMInitArgs object represented by this @@ -3155,7 +3155,7 @@ class ScopedVMInitArgs : public StackObj { _is_set = true; JavaVMOption* options_arr = NEW_C_HEAP_ARRAY_RETURN_NULL( JavaVMOption, options->length(), mtArguments); - if (options_arr == NULL) { + if (options_arr == nullptr) { return JNI_ENOMEM; } _args.options = options_arr; @@ -3163,7 +3163,7 @@ class ScopedVMInitArgs : public StackObj { for (int i = 0; i < options->length(); i++) { options_arr[i] = options->at(i); options_arr[i].optionString = os::strdup(options_arr[i].optionString); - if (options_arr[i].optionString == NULL) { + if (options_arr[i].optionString == nullptr) { // Rely on the destructor to do cleanup. _args.nOptions = i; return JNI_ENOMEM; @@ -3178,21 +3178,21 @@ class ScopedVMInitArgs : public StackObj { JavaVMInitArgs* get() { return &_args; } char* container_name() { return _container_name; } bool is_set() { return _is_set; } - bool found_vm_options_file_arg() { return _vm_options_file_arg != NULL; } + bool found_vm_options_file_arg() { return _vm_options_file_arg != nullptr; } char* vm_options_file_arg() { return _vm_options_file_arg; } void set_vm_options_file_arg(const char *vm_options_file_arg) { - if (_vm_options_file_arg != NULL) { + if (_vm_options_file_arg != nullptr) { os::free(_vm_options_file_arg); } _vm_options_file_arg = os::strdup_check_oom(vm_options_file_arg); } ~ScopedVMInitArgs() { - if (_vm_options_file_arg != NULL) { + if (_vm_options_file_arg != nullptr) { os::free(_vm_options_file_arg); } - if (_args.options == NULL) return; + if (_args.options == nullptr) return; for (int i = 0; i < _args.nOptions; i++) { os::free(_args.options[i].optionString); } @@ -3204,7 +3204,7 @@ class ScopedVMInitArgs : public StackObj { jint insert(const JavaVMInitArgs* args, const JavaVMInitArgs* args_to_insert, const int vm_options_file_pos) { - assert(_args.options == NULL, "shouldn't be set yet"); + assert(_args.options == nullptr, "shouldn't be set yet"); assert(args_to_insert->nOptions != 0, "there should be args to insert"); assert(vm_options_file_pos != -1, "vm_options_file_pos should be set"); @@ -3241,11 +3241,11 @@ jint Arguments::parse_options_environment_variable(const char* name, // Don't check this environment variable if user has special privileges // (e.g. unix su command). - if (buffer == NULL || os::have_special_privileges()) { + if (buffer == nullptr || os::have_special_privileges()) { return JNI_OK; } - if ((buffer = os::strdup(buffer)) == NULL) { + if ((buffer = os::strdup(buffer)) == nullptr) { return JNI_ENOMEM; } @@ -3284,11 +3284,11 @@ jint Arguments::parse_vm_options_file(const char* file_name, ScopedVMInitArgs* v return JNI_OK; } - // '+ 1' for NULL termination even with max bytes + // '+ 1' for null termination even with max bytes size_t bytes_alloc = stbuf.st_size + 1; char *buf = NEW_C_HEAP_ARRAY_RETURN_NULL(char, bytes_alloc, mtArguments); - if (NULL == buf) { + if (nullptr == buf) { jio_fprintf(defaultStream::error_stream(), "Could not allocate read buffer for options file parse\n"); ::close(fd); @@ -3368,13 +3368,13 @@ jint Arguments::parse_options_buffer(const char* name, char* buffer, const size_ } } - // steal a white space character and set it to NULL + // steal a white space character and set it to null *wrt++ = '\0'; // We now have a complete token JavaVMOption option; option.optionString = opt_hd; - option.extraInfo = NULL; + option.extraInfo = nullptr; options.append(option); // Fill in option @@ -3411,7 +3411,7 @@ char* Arguments::get_default_shared_archive_path() { char jvm_path[JVM_MAXPATHLEN]; os::jvm_path(jvm_path, sizeof(jvm_path)); char *end = strrchr(jvm_path, *os::file_separator()); - if (end != NULL) *end = '\0'; + if (end != nullptr) *end = '\0'; size_t jvm_path_len = strlen(jvm_path); size_t file_sep_len = strlen(os::file_separator()); const size_t len = jvm_path_len + file_sep_len + 20; @@ -3423,7 +3423,7 @@ char* Arguments::get_default_shared_archive_path() { } int Arguments::num_archives(const char* archive_path) { - if (archive_path == NULL) { + if (archive_path == nullptr) { return 0; } int npaths = 1; @@ -3442,7 +3442,7 @@ void Arguments::extract_shared_archive_paths(const char* archive_path, char** top_archive_path) { char* begin_ptr = (char*)archive_path; char* end_ptr = strchr((char*)archive_path, os::path_separator()[0]); - if (end_ptr == NULL || end_ptr == begin_ptr) { + if (end_ptr == nullptr || end_ptr == begin_ptr) { vm_exit_during_initialization("Base archive was not specified", archive_path); } size_t len = end_ptr - begin_ptr; @@ -3456,7 +3456,7 @@ void Arguments::extract_shared_archive_paths(const char* archive_path, vm_exit_during_initialization("Top archive was not specified", archive_path); } end_ptr = strchr(begin_ptr, '\0'); - assert(end_ptr != NULL, "sanity"); + assert(end_ptr != nullptr, "sanity"); len = end_ptr - begin_ptr; cur_path = NEW_C_HEAP_ARRAY(char, len + 1, mtInternal); strncpy(cur_path, begin_ptr, len + 1); @@ -3508,7 +3508,7 @@ void Arguments::init_shared_archive_paths() { "Cannot have more than 2 archive files specified in the -XX:SharedArchiveFile option"); } if (archives == 1) { - char* base_archive_path = NULL; + char* base_archive_path = nullptr; bool success = FileMapInfo::get_base_archive_name_from_header(SharedArchiveFile, &base_archive_path); if (!success) { @@ -3526,7 +3526,7 @@ void Arguments::init_shared_archive_paths() { } no_shared_spaces("invalid archive"); } - } else if (base_archive_path == NULL) { + } else if (base_archive_path == nullptr) { // User has specified a single archive, which is a static archive. SharedArchivePath = const_cast(SharedArchiveFile); } else { @@ -3537,8 +3537,8 @@ void Arguments::init_shared_archive_paths() { } else { extract_shared_archive_paths((const char*)SharedArchiveFile, &SharedArchivePath, &SharedDynamicArchivePath); - if (SharedArchivePath == NULL) { - assert(SharedDynamicArchivePath == NULL, "must be"); + if (SharedArchivePath == nullptr) { + assert(SharedDynamicArchivePath == nullptr, "must be"); no_shared_spaces("invalid archive"); } } @@ -3751,7 +3751,7 @@ bool Arguments::handle_deprecated_print_gc_flags() { LogTarget(Error, logging) target; LogStream errstream(target); - return LogConfiguration::parse_log_arguments(_legacyGCLogging.file, gc_conf, NULL, NULL, &errstream); + return LogConfiguration::parse_log_arguments(_legacyGCLogging.file, gc_conf, nullptr, nullptr, &errstream); } else if (PrintGC || PrintGCDetails || (_legacyGCLogging.lastFlag == 1)) { LogConfiguration::configure_stdout(LogLevel::Info, !PrintGCDetails, LOG_TAGS(gc)); } @@ -3823,7 +3823,7 @@ jint Arguments::parse(const JavaVMInitArgs* initial_cmd_args) { // Parse the options in the /java.base/jdk/internal/vm/options resource, if present char *vmoptions = ClassLoader::lookup_vm_options(); - if (vmoptions != NULL) { + if (vmoptions != nullptr) { code = parse_options_buffer("vm options resource", vmoptions, strlen(vmoptions), &initial_vm_options_args); FREE_C_HEAP_ARRAY(char, vmoptions); if (code != JNI_OK) { @@ -3860,7 +3860,7 @@ jint Arguments::parse(const JavaVMInitArgs* initial_cmd_args) { } const char* flags_file = Arguments::get_jvm_flags_file(); - settings_file_specified = (flags_file != NULL); + settings_file_specified = (flags_file != nullptr); if (IgnoreUnrecognizedVMOptions) { cur_cmd_args->ignoreUnrecognized = true; @@ -3958,7 +3958,7 @@ jint Arguments::parse(const JavaVMInitArgs* initial_cmd_args) { "Shared spaces are not supported in this VM\n"); return JNI_ERR; } - if (DumpLoadedClassList != NULL) { + if (DumpLoadedClassList != nullptr) { jio_fprintf(defaultStream::error_stream(), "DumpLoadedClassList is not supported in this VM\n"); return JNI_ERR; @@ -3976,7 +3976,7 @@ jint Arguments::parse(const JavaVMInitArgs* initial_cmd_args) { const NMT_TrackingLevel lvl = NMTUtil::parse_tracking_level(NativeMemoryTracking); if (lvl == NMT_unknown) { jio_fprintf(defaultStream::error_stream(), - "Syntax error, expecting -XX:NativeMemoryTracking=[off|summary|detail]", NULL); + "Syntax error, expecting -XX:NativeMemoryTracking=[off|summary|detail]", nullptr); return JNI_ERR; } if (PrintNMTStatistics && lvl == NMT_off) { @@ -4109,7 +4109,7 @@ jint Arguments::adjust_after_os() { int Arguments::PropertyList_count(SystemProperty* pl) { int count = 0; - while(pl != NULL) { + while(pl != nullptr) { count++; pl = pl->next(); } @@ -4119,7 +4119,7 @@ int Arguments::PropertyList_count(SystemProperty* pl) { // Return the number of readable properties. int Arguments::PropertyList_readable_count(SystemProperty* pl) { int count = 0; - while(pl != NULL) { + while(pl != nullptr) { if (pl->readable()) { count++; } @@ -4129,41 +4129,41 @@ int Arguments::PropertyList_readable_count(SystemProperty* pl) { } const char* Arguments::PropertyList_get_value(SystemProperty *pl, const char* key) { - assert(key != NULL, "just checking"); + assert(key != nullptr, "just checking"); SystemProperty* prop; - for (prop = pl; prop != NULL; prop = prop->next()) { + for (prop = pl; prop != nullptr; prop = prop->next()) { if (strcmp(key, prop->key()) == 0) return prop->value(); } - return NULL; + return nullptr; } // Return the value of the requested property provided that it is a readable property. const char* Arguments::PropertyList_get_readable_value(SystemProperty *pl, const char* key) { - assert(key != NULL, "just checking"); + assert(key != nullptr, "just checking"); SystemProperty* prop; // Return the property value if the keys match and the property is not internal or // it's the special internal property "jdk.boot.class.path.append". - for (prop = pl; prop != NULL; prop = prop->next()) { + for (prop = pl; prop != nullptr; prop = prop->next()) { if (strcmp(key, prop->key()) == 0) { if (!prop->internal()) { return prop->value(); } else if (strcmp(key, "jdk.boot.class.path.append") == 0) { return prop->value(); } else { - // Property is internal and not jdk.boot.class.path.append so return NULL. - return NULL; + // Property is internal and not jdk.boot.class.path.append so return null. + return nullptr; } } } - return NULL; + return nullptr; } void Arguments::PropertyList_add(SystemProperty** plist, SystemProperty *new_p) { SystemProperty* p = *plist; - if (p == NULL) { + if (p == nullptr) { *plist = new_p; } else { - while (p->next() != NULL) { + while (p->next() != nullptr) { p = p->next(); } p->set_next(new_p); @@ -4172,7 +4172,7 @@ void Arguments::PropertyList_add(SystemProperty** plist, SystemProperty *new_p) void Arguments::PropertyList_add(SystemProperty** plist, const char* k, const char* v, bool writeable, bool internal) { - if (plist == NULL) + if (plist == nullptr) return; SystemProperty* new_p = new SystemProperty(k, v, writeable, internal); @@ -4187,13 +4187,13 @@ void Arguments::PropertyList_add(SystemProperty *element) { void Arguments::PropertyList_unique_add(SystemProperty** plist, const char* k, const char* v, PropertyAppendable append, PropertyWriteable writeable, PropertyInternal internal) { - if (plist == NULL) + if (plist == nullptr) return; // If property key exists and is writeable, then update with new value. // Trying to update a non-writeable property is silently ignored. SystemProperty* prop; - for (prop = *plist; prop != NULL; prop = prop->next()) { + for (prop = *plist; prop != nullptr; prop = prop->next()) { if (strcmp(k, prop->key()) == 0) { if (append == AppendProperty) { prop->append_writeable_value(v); @@ -4212,7 +4212,7 @@ void Arguments::PropertyList_unique_add(SystemProperty** plist, const char* k, c // the destination buffer pointed by buf. Otherwise, returns false. // Notes: // 1. If the length (buflen) of the destination buffer excluding the -// NULL terminator character is not long enough for holding the expanded +// null terminator character is not long enough for holding the expanded // pid characters, it also returns false instead of returning the partially // expanded one. // 2. The passed in "buflen" should be large enough to hold the null terminator. diff --git a/src/hotspot/share/runtime/arguments.hpp b/src/hotspot/share/runtime/arguments.hpp index cbf9dc3374546..46ac86c26141d 100644 --- a/src/hotspot/share/runtime/arguments.hpp +++ b/src/hotspot/share/runtime/arguments.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -55,7 +55,7 @@ struct SpecialFlag { }; struct LegacyGCLogging { - const char* file; // NULL -> stdout + const char* file; // null -> stdout int lastFlag; // 0 not set; 1 -> -verbose:gc; 2 -> -Xloggc }; @@ -113,7 +113,7 @@ class SystemProperty : public PathString { bool readable() const { return !_internal || (strcmp(_key, "jdk.boot.class.path.append") == 0 && - value() != NULL); + value() != nullptr); } // A system property should only have its value set @@ -143,7 +143,7 @@ class SystemProperty : public PathString { class AgentLibrary : public CHeapObj { friend class AgentLibraryList; public: - // Is this library valid or not. Don't rely on os_lib == NULL as statically + // Is this library valid or not. Don't rely on os_lib == nullptr as statically // linked lib could have handle of RTLD_DEFAULT which == 0 on some platforms enum AgentState { agent_invalid = 0, @@ -185,7 +185,7 @@ class AgentLibraryList { AgentLibrary* _first; AgentLibrary* _last; public: - bool is_empty() const { return _first == NULL; } + bool is_empty() const { return _first == nullptr; } AgentLibrary* first() const { return _first; } // add to the end of the list @@ -196,23 +196,23 @@ class AgentLibraryList { _last->_next = lib; _last = lib; } - lib->_next = NULL; + lib->_next = nullptr; } // search for and remove a library known to be in the list void remove(AgentLibrary* lib) { AgentLibrary* curr; - AgentLibrary* prev = NULL; - for (curr = first(); curr != NULL; prev = curr, curr = curr->next()) { + AgentLibrary* prev = nullptr; + for (curr = first(); curr != nullptr; prev = curr, curr = curr->next()) { if (curr == lib) { break; } } - assert(curr != NULL, "always should be found"); + assert(curr != nullptr, "always should be found"); - if (curr != NULL) { + if (curr != nullptr) { // it was found, by-pass this library - if (prev == NULL) { + if (prev == nullptr) { _first = curr->_next; } else { prev->_next = curr->_next; @@ -220,13 +220,13 @@ class AgentLibraryList { if (curr == _last) { _last = prev; } - curr->_next = NULL; + curr->_next = nullptr; } } AgentLibraryList() { - _first = NULL; - _last = NULL; + _first = nullptr; + _last = nullptr; } }; @@ -436,7 +436,7 @@ class Arguments : AllStatic { static bool is_bad_option(const JavaVMOption* option, jboolean ignore, const char* option_type); static bool is_bad_option(const JavaVMOption* option, jboolean ignore) { - return is_bad_option(option, ignore, NULL); + return is_bad_option(option, ignore, nullptr); } static void describe_range_error(ArgsRange errcode); @@ -467,7 +467,7 @@ class Arguments : AllStatic { static JVMFlag* find_jvm_flag(const char* name, size_t name_length); // Return the "real" name for option arg if arg is an alias, and print a warning if arg is deprecated. - // Return NULL if the arg has expired. + // Return nullptr if the arg has expired. static const char* handle_aliases_and_deprecation(const char* arg); static char* SharedArchivePath; @@ -520,7 +520,7 @@ class Arguments : AllStatic { // convenient methods to get and set jvm_flags_file static const char* get_jvm_flags_file() { return _jvm_flags_file; } static void set_jvm_flags_file(const char *value) { - if (_jvm_flags_file != NULL) { + if (_jvm_flags_file != nullptr) { os::free(_jvm_flags_file); } _jvm_flags_file = os::strdup_check_oom(value); @@ -603,7 +603,7 @@ class Arguments : AllStatic { static void add_patch_mod_prefix(const char *module_name, const char *path, bool* patch_mod_javabase); static void set_boot_class_path(const char *value, bool has_jimage) { // During start up, set by os::set_boot_path() - assert(get_boot_class_path() == NULL, "Boot class path previously set"); + assert(get_boot_class_path() == nullptr, "Boot class path previously set"); _boot_class_path->set_value(value); _has_jimage = has_jimage; } @@ -621,7 +621,7 @@ class Arguments : AllStatic { static char* get_appclasspath() { return _java_class_path->value(); } static void fix_appclasspath(); - static char* get_default_shared_archive_path() NOT_CDS_RETURN_(NULL); + static char* get_default_shared_archive_path() NOT_CDS_RETURN_(nullptr); static void init_shared_archive_paths() NOT_CDS_RETURN; // Operation modi @@ -676,15 +676,15 @@ do { \ } \ } while(0) -// similar to UNSUPPORTED_OPTION but sets flag to NULL -#define UNSUPPORTED_OPTION_NULL(opt) \ -do { \ - if (opt) { \ - if (FLAG_IS_CMDLINE(opt)) { \ +// similar to UNSUPPORTED_OPTION but sets flag to nullptr +#define UNSUPPORTED_OPTION_NULL(opt) \ +do { \ + if (opt) { \ + if (FLAG_IS_CMDLINE(opt)) { \ warning("-XX flag " #opt " not supported in this VM"); \ - } \ - FLAG_SET_DEFAULT(opt, NULL); \ - } \ + } \ + FLAG_SET_DEFAULT(opt, nullptr); \ + } \ } while(0) // Initialize options not supported in this release, with a warning diff --git a/src/hotspot/share/runtime/atomic.hpp b/src/hotspot/share/runtime/atomic.hpp index c730d985645d9..f9cafaf7be48b 100644 --- a/src/hotspot/share/runtime/atomic.hpp +++ b/src/hotspot/share/runtime/atomic.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -151,7 +151,7 @@ class Atomic : AllStatic { T exchange_value, atomic_memory_order order = memory_order_conservative); - // Performs atomic compare of *dest and NULL, and replaces *dest + // Performs atomic compare of *dest and nullptr, and replaces *dest // with exchange_value if the comparison succeeded. Returns true if // the comparison succeeded and the exchange occurred. This is // often used as part of lazy initialization, as a lock-free @@ -754,7 +754,7 @@ inline bool Atomic::replace_if_null(D* volatile* dest, T* value, // Presently using a trivial implementation in terms of cmpxchg. // Consider adding platform support, to permit the use of compiler // intrinsics like gcc's __sync_bool_compare_and_swap. - D* expected_null = NULL; + D* expected_null = nullptr; return expected_null == cmpxchg(dest, expected_null, value, order); } diff --git a/src/hotspot/share/runtime/basicLock.cpp b/src/hotspot/share/runtime/basicLock.cpp index 623c3d1f7e8eb..40ba712bf6cee 100644 --- a/src/hotspot/share/runtime/basicLock.cpp +++ b/src/hotspot/share/runtime/basicLock.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,7 +32,7 @@ void BasicLock::print_on(outputStream* st, oop owner) const { markWord mark_word = displaced_header(); if (mark_word.value() != 0) { // Print monitor info if there's an owning oop and it refers to this BasicLock. - bool print_monitor_info = (owner != NULL) && (owner->mark() == markWord::from_pointer((void*)this)); + bool print_monitor_info = (owner != nullptr) && (owner->mark() == markWord::from_pointer((void*)this)); mark_word.print_on(st, print_monitor_info); } } diff --git a/src/hotspot/share/runtime/continuationFreezeThaw.cpp b/src/hotspot/share/runtime/continuationFreezeThaw.cpp index ceef56ca751f2..0f05307862bc4 100644 --- a/src/hotspot/share/runtime/continuationFreezeThaw.cpp +++ b/src/hotspot/share/runtime/continuationFreezeThaw.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1906,7 +1906,7 @@ NOINLINE intptr_t* Thaw::thaw_fast(stackChunkOop chunk) { } // Are we thawing the last frame(s) in the continuation - const bool is_last = empty && chunk->parent() == NULL; + const bool is_last = empty && chunk->parent() == nullptr; assert(!is_last || argsize == 0, ""); log_develop_trace(continuations)("thaw_fast partial: %d is_last: %d empty: %d size: %d argsize: %d entrySP: " PTR_FORMAT, @@ -2606,7 +2606,7 @@ class ConfigResolve { template static void resolve_gc() { BarrierSet* bs = BarrierSet::barrier_set(); - assert(bs != NULL, "freeze/thaw invoked before BarrierSet is set"); + assert(bs != nullptr, "freeze/thaw invoked before BarrierSet is set"); switch (bs->kind()) { #define BARRIER_SET_RESOLVE_BARRIER_CLOSURE(bs_name) \ case BarrierSet::bs_name: { \ diff --git a/src/hotspot/share/runtime/deoptimization.cpp b/src/hotspot/share/runtime/deoptimization.cpp index 5de1506968942..5eafb2281d68f 100644 --- a/src/hotspot/share/runtime/deoptimization.cpp +++ b/src/hotspot/share/runtime/deoptimization.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -208,7 +208,7 @@ static bool rematerialize_objects(JavaThread* thread, int exec_mode, CompiledMet frame& deoptee, RegisterMap& map, GrowableArray* chunk, bool& deoptimized_objects) { bool realloc_failures = false; - assert (chunk->at(0)->scope() != NULL,"expect only compiled java frames"); + assert (chunk->at(0)->scope() != nullptr,"expect only compiled java frames"); JavaThread* deoptee_thread = chunk->at(0)->thread(); assert(exec_mode == Deoptimization::Unpack_none || (deoptee_thread == thread), @@ -239,7 +239,7 @@ static bool rematerialize_objects(JavaThread* thread, int exec_mode, CompiledMet tty->cr(); } } - if (objects != NULL) { + if (objects != nullptr) { if (exec_mode == Deoptimization::Unpack_none) { assert(thread->thread_state() == _thread_in_vm, "assumption"); JavaThread* THREAD = thread; // For exception macros. @@ -252,7 +252,7 @@ static bool rematerialize_objects(JavaThread* thread, int exec_mode, CompiledMet realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD); JRT_END } - bool skip_internal = (compiled_method != NULL) && !compiled_method->is_compiled_by_jvmci(); + bool skip_internal = (compiled_method != nullptr) && !compiled_method->is_compiled_by_jvmci(); Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal); if (TraceDeoptimization) { print_objects(deoptee_thread, objects, realloc_failures); @@ -276,7 +276,7 @@ static void restore_eliminated_locks(JavaThread* thread, GrowableArraylength(); i++) { compiledVFrame* cvf = chunk->at(i); - assert (cvf->scope() != NULL,"expect only compiled java frames"); + assert (cvf->scope() != nullptr,"expect only compiled java frames"); GrowableArray* monitors = cvf->monitors(); if (monitors->is_nonempty()) { bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee, @@ -295,7 +295,7 @@ static void restore_eliminated_locks(JavaThread* thread, GrowableArraycurrent_waiting_monitor(); - if (monitor != NULL && monitor->object() == mi->owner()) { + if (monitor != nullptr && monitor->object() == mi->owner()) { st.print_cr(" object <" INTPTR_FORMAT "> DEFERRED relocking after wait", p2i(mi->owner())); continue; } @@ -358,7 +358,7 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread // Allocate our special deoptimization ResourceMark DeoptResourceMark* dmark = new DeoptResourceMark(current); - assert(current->deopt_mark() == NULL, "Pending deopt!"); + assert(current->deopt_mark() == nullptr, "Pending deopt!"); current->set_deopt_mark(dmark); frame stub_frame = current->last_frame(); // Makes stack walkable as side effect @@ -373,7 +373,7 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread // Now get the deoptee with a valid map frame deoptee = stub_frame.sender(&map); // Set the deoptee nmethod - assert(current->deopt_compiled_method() == NULL, "Pending deopt!"); + assert(current->deopt_compiled_method() == nullptr, "Pending deopt!"); CompiledMethod* cm = deoptee.cb()->as_compiled_method_or_null(); current->set_deopt_compiled_method(cm); @@ -432,10 +432,10 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread #endif // !PRODUCT GrowableArray* expressions = trap_scope->expressions(); - guarantee(expressions != NULL && expressions->length() > 0, "must have exception to throw"); + guarantee(expressions != nullptr && expressions->length() > 0, "must have exception to throw"); ScopeValue* topOfStack = expressions->top(); exceptionObject = StackValue::create_stack_value(&deoptee, &map, topOfStack)->get_obj(); - guarantee(exceptionObject() != NULL, "exception oop can not be null"); + guarantee(exceptionObject() != nullptr, "exception oop can not be null"); } vframeArray* array = create_vframeArray(current, deoptee, &map, chunk, realloc_failures); @@ -447,7 +447,7 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread } #endif - assert(current->vframe_array_head() == NULL, "Pending deopt!"); + assert(current->vframe_array_head() == nullptr, "Pending deopt!"); current->set_vframe_array_head(array); // Now that the vframeArray has been created if we have any deferred local writes @@ -465,7 +465,7 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread // If the deopt call site is a MethodHandle invoke call site we have // to adjust the unpack_sp. nmethod* deoptee_nm = deoptee.cb()->as_nmethod_or_null(); - if (deoptee_nm != NULL && deoptee_nm->is_method_handle_return(deoptee.pc())) + if (deoptee_nm != nullptr && deoptee_nm->is_method_handle_return(deoptee.pc())) unpack_sp = deoptee.unextended_sp(); #ifdef ASSERT @@ -604,10 +604,10 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread ContinuationEntry::from_frame(deopt_sender)->set_argsize(0); } - assert(CodeCache::find_blob(frame_pcs[0]) != NULL, "bad pc"); + assert(CodeCache::find_blob(frame_pcs[0]) != nullptr, "bad pc"); #if INCLUDE_JVMCI - if (exceptionObject() != NULL) { + if (exceptionObject() != nullptr) { current->set_exception_oop(exceptionObject()); exec_mode = Unpack_exception; } @@ -655,18 +655,18 @@ void Deoptimization::cleanup_deopt_info(JavaThread *thread, vframeArray *array) { // Get array if coming from exception - if (array == NULL) { + if (array == nullptr) { array = thread->vframe_array_head(); } - thread->set_vframe_array_head(NULL); + thread->set_vframe_array_head(nullptr); // Free the previous UnrollBlock vframeArray* old_array = thread->vframe_array_last(); thread->set_vframe_array_last(array); - if (old_array != NULL) { + if (old_array != nullptr) { UnrollBlock* old_info = old_array->unroll_block(); - old_array->set_unroll_block(NULL); + old_array->set_unroll_block(nullptr); delete old_info; delete old_array; } @@ -675,8 +675,8 @@ void Deoptimization::cleanup_deopt_info(JavaThread *thread, // inside the vframeArray (StackValueCollections) delete thread->deopt_mark(); - thread->set_deopt_mark(NULL); - thread->set_deopt_compiled_method(NULL); + thread->set_deopt_mark(nullptr); + thread->set_deopt_compiled_method(nullptr); if (JvmtiExport::can_pop_frame()) { @@ -755,7 +755,7 @@ JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_m // clear it to make sure JFR understands not to try and walk stacks from events // in here. intptr_t* sp = thread->frame_anchor()->last_Java_sp(); - thread->frame_anchor()->set_last_Java_sp(NULL); + thread->frame_anchor()->set_last_Java_sp(nullptr); // Unpack the interpreter frames and any adapter frame (c2 only) we might create. array->unpack_to_stack(stub_frame, exec_mode, info->caller_actual_parameters()); @@ -919,7 +919,7 @@ void Deoptimization::deoptimize_all_marked(nmethod* nmethod_only) { ResourceMark rm; // Make the dependent methods not entrant - if (nmethod_only != NULL) { + if (nmethod_only != nullptr) { nmethod_only->mark_for_deoptimization(); nmethod_only->make_not_entrant(); CodeCache::make_nmethod_deoptimized(nmethod_only); @@ -946,7 +946,7 @@ class BoxCacheBase : public CHeapObj { ResourceMark rm(thread); char* klass_name_str = klass_name->as_C_string(); InstanceKlass* ik = SystemDictionary::find_instance_klass(thread, klass_name, Handle(), Handle()); - guarantee(ik != NULL, "%s must be loaded", klass_name_str); + guarantee(ik != nullptr, "%s must be loaded", klass_name_str); guarantee(ik->is_initialized(), "%s must be initialized", klass_name_str); CacheType::compute_offsets(ik); return ik; @@ -972,7 +972,7 @@ template class Box } public: static BoxCache* singleton(Thread* thread) { - if (_singleton == NULL) { + if (_singleton == nullptr) { BoxCache* s = new BoxCache(thread); if (!Atomic::replace_if_null(&_singleton, s)) { delete s; @@ -985,7 +985,7 @@ template class Box int offset = value - _low; return objArrayOop(JNIHandles::resolve_non_null(_cache))->obj_at(offset); } - return NULL; + return nullptr; } oop lookup_raw(intptr_t raw_value) { // Have to cast to avoid little/big-endian problems. @@ -1004,11 +1004,11 @@ typedef BoxCache typedef BoxCache ShortBoxCache; typedef BoxCache ByteBoxCache; -template<> BoxCache* BoxCache::_singleton = NULL; -template<> BoxCache* BoxCache::_singleton = NULL; -template<> BoxCache* BoxCache::_singleton = NULL; -template<> BoxCache* BoxCache::_singleton = NULL; -template<> BoxCache* BoxCache::_singleton = NULL; +template<> BoxCache* BoxCache::_singleton = nullptr; +template<> BoxCache* BoxCache::_singleton = nullptr; +template<> BoxCache* BoxCache::_singleton = nullptr; +template<> BoxCache* BoxCache::_singleton = nullptr; +template<> BoxCache* BoxCache::_singleton = nullptr; class BooleanBoxCache : public BoxCacheBase { jobject _true_cache; @@ -1026,7 +1026,7 @@ class BooleanBoxCache : public BoxCacheBase { } public: static BooleanBoxCache* singleton(Thread* thread) { - if (_singleton == NULL) { + if (_singleton == nullptr) { BooleanBoxCache* s = new BooleanBoxCache(thread); if (!Atomic::replace_if_null(&_singleton, s)) { delete s; @@ -1047,7 +1047,7 @@ class BooleanBoxCache : public BoxCacheBase { } }; -BooleanBoxCache* BooleanBoxCache::_singleton = NULL; +BooleanBoxCache* BooleanBoxCache::_singleton = nullptr; oop Deoptimization::get_cached_box(AutoBoxObjectValue* bv, frame* fr, RegisterMap* reg_map, TRAPS) { Klass* k = java_lang_Class::as_Klass(bv->klass()->as_ConstantOopReadValue()->value()()); @@ -1064,7 +1064,7 @@ oop Deoptimization::get_cached_box(AutoBoxObjectValue* bv, frame* fr, RegisterMa default:; } } - return NULL; + return nullptr; } #endif // INCLUDE_JVMCI @@ -1082,7 +1082,7 @@ bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* ObjectValue* sv = (ObjectValue*) objects->at(i); Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()()); - oop obj = NULL; + oop obj = nullptr; if (k->is_instance_klass()) { #if INCLUDE_JVMCI @@ -1090,7 +1090,7 @@ bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* if (cm->is_compiled_by_jvmci() && sv->is_auto_box()) { AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv; obj = get_cached_box(abv, fr, reg_map, THREAD); - if (obj != NULL) { + if (obj != nullptr) { // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it. abv->set_cached(true); } @@ -1098,7 +1098,7 @@ bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* #endif // INCLUDE_JVMCI InstanceKlass* ik = InstanceKlass::cast(k); - if (obj == NULL) { + if (obj == nullptr) { #ifdef COMPILER2 if (EnableVectorSupport && VectorSupport::is_vector(ik)) { obj = VectorSupport::allocate_vector(ik, fr, reg_map, sv, THREAD); @@ -1119,12 +1119,12 @@ bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* obj = ak->allocate(sv->field_size(), THREAD); } - if (obj == NULL) { + if (obj == nullptr) { failures = true; } assert(sv->value().is_null(), "redundant reallocation"); - assert(obj != NULL || HAS_PENDING_EXCEPTION, "allocation should succeed or we should get an exception"); + assert(obj != nullptr || HAS_PENDING_EXCEPTION, "allocation should succeed or we should get an exception"); CLEAR_PENDING_EXCEPTION; sv->set_value(obj); } @@ -1326,7 +1326,7 @@ int compare(ReassignedField* left, ReassignedField* right) { static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal) { GrowableArray* fields = new GrowableArray(); InstanceKlass* ik = klass; - while (ik != NULL) { + while (ik != nullptr) { for (AllFieldStream fs(ik); !fs.done(); fs.next()) { if (!fs.access_flags().is_static() && (!skip_internal || !fs.access_flags().is_internal())) { ReassignedField field; @@ -1499,13 +1499,13 @@ bool Deoptimization::relock_objects(JavaThread* thread, GrowableArrayset_displaced_header(markWord::encode((BasicLock*) NULL)); + mark.locker()->set_displaced_header(markWord::encode((BasicLock*) nullptr)); obj->set_mark(dmw); } if (mark.has_monitor()) { // defer relocking if the deoptee thread is currently waiting for obj ObjectMonitor* waiting_monitor = deoptee_thread->current_waiting_monitor(); - if (waiting_monitor != NULL && waiting_monitor->object() == obj()) { + if (waiting_monitor != nullptr && waiting_monitor->object() == obj()) { assert(fr.is_deoptimized_frame(), "frame must be scheduled for deoptimization"); mon_info->lock()->set_displaced_header(markWord::unused_mark()); JvmtiDeferredUpdates::inc_relock_count_after_wait(deoptee_thread); @@ -1585,10 +1585,10 @@ void Deoptimization::pop_frames_failed_reallocs(JavaThread* thread, vframeArray* // reallocations of synchronized objects) and be confused. for (int i = 0; i < array->frames(); i++) { MonitorChunk* monitors = array->element(i)->monitors(); - if (monitors != NULL) { + if (monitors != nullptr) { for (int j = 0; j < monitors->number_of_monitors(); j++) { BasicObjectLock* src = monitors->at(j); - if (src->obj() != NULL) { + if (src->obj() != nullptr) { ObjectSynchronizer::exit(src->obj(), src->lock(), thread); } } @@ -1606,9 +1606,9 @@ void Deoptimization::deoptimize_single_frame(JavaThread* thread, frame fr, Deopt gather_statistics(reason, Action_none, Bytecodes::_illegal); - if (LogCompilation && xtty != NULL) { + if (LogCompilation && xtty != nullptr) { CompiledMethod* cm = fr.cb()->as_compiled_method_or_null(); - assert(cm != NULL, "only compiled methods can deopt"); + assert(cm != nullptr, "only compiled methods can deopt"); ttyLocker ttyl; xtty->begin_head("deoptimized thread='" UINTX_FORMAT "' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc())); @@ -1662,9 +1662,9 @@ address Deoptimization::deoptimize_for_missing_exception_handler(CompiledMethod* compiledVFrame* cvf = compiledVFrame::cast(vf); ScopeDesc* imm_scope = cvf->scope(); MethodData* imm_mdo = get_method_data(thread, methodHandle(thread, imm_scope->method()), true); - if (imm_mdo != NULL) { - ProfileData* pdata = imm_mdo->allocate_bci_to_data(imm_scope->bci(), NULL); - if (pdata != NULL && pdata->is_BitData()) { + if (imm_mdo != nullptr) { + ProfileData* pdata = imm_mdo->allocate_bci_to_data(imm_scope->bci(), nullptr); + if (pdata != nullptr && pdata->is_BitData()) { BitData* bit_data = (BitData*) pdata; bit_data->set_exception_seen(); } @@ -1673,7 +1673,7 @@ address Deoptimization::deoptimize_for_missing_exception_handler(CompiledMethod* Deoptimization::deoptimize(thread, caller_frame, Deoptimization::Reason_not_compiled_exception_handler); MethodData* trap_mdo = get_method_data(thread, methodHandle(thread, cm->method()), true); - if (trap_mdo != NULL) { + if (trap_mdo != nullptr) { trap_mdo->inc_trap_count(Deoptimization::Reason_not_compiled_exception_handler); } @@ -1726,7 +1726,7 @@ Deoptimization::get_method_data(JavaThread* thread, const methodHandle& m, bool create_if_missing) { JavaThread* THREAD = thread; // For exception macros. MethodData* mdo = m()->method_data(); - if (mdo == NULL && create_if_missing && !HAS_PENDING_EXCEPTION) { + if (mdo == nullptr && create_if_missing && !HAS_PENDING_EXCEPTION) { // Build an MDO. Ignore errors like OutOfMemory; // that simply means we won't have an MDO to update. Method::build_profiling_method_data(m, THREAD); @@ -1813,8 +1813,8 @@ static void post_deoptimization_event(CompiledMethod* nm, int instruction, Deoptimization::DeoptReason reason, Deoptimization::DeoptAction action) { - assert(nm != NULL, "invariant"); - assert(method != NULL, "invariant"); + assert(nm != nullptr, "invariant"); + assert(method != nullptr, "invariant"); if (EventDeoptimization::is_enabled()) { static bool serializers_registered = false; if (!serializers_registered) { @@ -1970,7 +1970,7 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint tr ResourceMark rm; ttyLocker ttyl; char buf[100]; - if (xtty != NULL) { + if (xtty != nullptr) { xtty->begin_head("uncommon_trap thread='" UINTX_FORMAT "' %s", os::current_thread_id(), format_trap_request(buf, sizeof(buf), trap_request)); @@ -1981,22 +1981,22 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint tr #endif nm->log_identity(xtty); } - Symbol* class_name = NULL; + Symbol* class_name = nullptr; bool unresolved = false; if (unloaded_class_index >= 0) { constantPoolHandle constants (current, trap_method->constants()); if (constants->tag_at(unloaded_class_index).is_unresolved_klass()) { class_name = constants->klass_name_at(unloaded_class_index); unresolved = true; - if (xtty != NULL) + if (xtty != nullptr) xtty->print(" unresolved='1'"); } else if (constants->tag_at(unloaded_class_index).is_symbol()) { class_name = constants->symbol_at(unloaded_class_index); } - if (xtty != NULL) + if (xtty != nullptr) xtty->name(class_name); } - if (xtty != NULL && trap_mdo != NULL && (int)reason < (int)MethodData::_trap_hist_limit) { + if (xtty != nullptr && trap_mdo != nullptr && (int)reason < (int)MethodData::_trap_hist_limit) { // Dump the relevant MDO state. // This is the deopt count for the current reason, any previous // reasons or recompiles seen at this point. @@ -2004,7 +2004,7 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint tr if (dcnt != 0) xtty->print(" count='%d'", dcnt); ProfileData* pdata = trap_mdo->bci_to_data(trap_bci); - int dos = (pdata == NULL)? 0: pdata->trap_state(); + int dos = (pdata == nullptr)? 0: pdata->trap_state(); if (dos != 0) { xtty->print(" state='%s'", format_trap_state(buf, sizeof(buf), dos)); if (trap_state_is_recompiled(dos)) { @@ -2014,7 +2014,7 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint tr } } } - if (xtty != NULL) { + if (xtty != nullptr) { xtty->stamp(); xtty->end_head(); } @@ -2027,7 +2027,7 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint tr #if INCLUDE_JVMCI if (nm->is_nmethod()) { const char* installed_code_name = nm->as_nmethod()->jvmci_name(); - if (installed_code_name != NULL) { + if (installed_code_name != nullptr) { st.print(" (JVMCI: installed code name=%s) ", installed_code_name); } } @@ -2042,14 +2042,14 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint tr , debug_id #endif ); - if (class_name != NULL) { + if (class_name != nullptr) { st.print(unresolved ? " unresolved class: " : " symbol: "); class_name->print_symbol_on(&st); } st.cr(); tty->print_raw(st.freeze()); } - if (xtty != NULL) { + if (xtty != nullptr) { // Log the precise location of the trap. for (ScopeDesc* sd = trap_scope; ; sd = sd->sender()) { xtty->begin_elem("jvms bci='%d'", sd->bci()); @@ -2178,8 +2178,8 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint tr // to use the MDO to detect hot deoptimization points and control // aggressive optimization. bool inc_recompile_count = false; - ProfileData* pdata = NULL; - if (ProfileTraps && CompilerConfig::is_c2_or_jvmci_compiler_enabled() && update_trap_state && trap_mdo != NULL) { + ProfileData* pdata = nullptr; + if (ProfileTraps && CompilerConfig::is_c2_or_jvmci_compiler_enabled() && update_trap_state && trap_mdo != nullptr) { assert(trap_mdo == get_method_data(current, profiled_method, false), "sanity"); uint this_trap_count = 0; bool maybe_prior_trap = false; @@ -2260,7 +2260,7 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint tr return; // the call did not change nmethod's state } - if (pdata != NULL) { + if (pdata != nullptr) { // Record the recompilation event, if any. int tstate0 = pdata->trap_state(); int tstate1 = trap_state_set_recompiled(tstate0, true); @@ -2273,14 +2273,14 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint tr // is recompiled for a reason other than RTM state change. // Assume that in new recompiled code the statistic could be different, // for example, due to different inlining. - if ((reason != Reason_rtm_state_change) && (trap_mdo != NULL) && + if ((reason != Reason_rtm_state_change) && (trap_mdo != nullptr) && UseRTMDeopt && (nm->as_nmethod()->rtm_state() != ProfileRTM)) { trap_mdo->atomic_set_rtm_state(ProfileRTM); } #endif // For code aging we count traps separately here, using make_not_entrant() // as a guard against simultaneous deopts in multiple threads. - if (reason == Reason_tenured && trap_mdo != NULL) { + if (reason == Reason_tenured && trap_mdo != nullptr) { trap_mdo->inc_tenure_traps(); } } @@ -2350,7 +2350,7 @@ Deoptimization::query_update_method_data(MethodData* trap_mdo, maybe_prior_trap = (prior_trap_count != 0); maybe_prior_recompile = (trap_mdo->decompile_count() != 0); } - ProfileData* pdata = NULL; + ProfileData* pdata = nullptr; // For reasons which are recorded per bytecode, we check per-BCI data. @@ -2360,11 +2360,11 @@ Deoptimization::query_update_method_data(MethodData* trap_mdo, // Find the profile data for this BCI. If there isn't one, // try to allocate one from the MDO's set of spares. // This will let us detect a repeated trap at this point. - pdata = trap_mdo->allocate_bci_to_data(trap_bci, reason_is_speculate(reason) ? compiled_method : NULL); + pdata = trap_mdo->allocate_bci_to_data(trap_bci, reason_is_speculate(reason) ? compiled_method : nullptr); - if (pdata != NULL) { + if (pdata != nullptr) { if (reason_is_speculate(reason) && !pdata->is_SpeculativeTrapData()) { - if (LogCompilation && xtty != NULL) { + if (LogCompilation && xtty != nullptr) { ttyLocker ttyl; // no more room for speculative traps in this MDO xtty->elem("speculative_traps_oom"); @@ -2385,7 +2385,7 @@ Deoptimization::query_update_method_data(MethodData* trap_mdo, if (tstate1 != tstate0) pdata->set_trap_state(tstate1); } else { - if (LogCompilation && xtty != NULL) { + if (LogCompilation && xtty != nullptr) { ttyLocker ttyl; // Missing MDP? Leave a small complaint in the log. xtty->elem("missing_mdp bci='%d'", trap_bci); @@ -2416,7 +2416,7 @@ Deoptimization::update_method_data_from_interpreter(MethodData* trap_mdo, int tr #if INCLUDE_JVMCI false, #endif - NULL, + nullptr, ignore_this_trap_count, ignore_maybe_prior_trap, ignore_maybe_prior_recompile); @@ -2640,14 +2640,14 @@ void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action, _deoptimization_hist[Reason_none][0][0] += 1; // total _deoptimization_hist[reason][0][0] += 1; // per-reason total juint* cases = _deoptimization_hist[reason][1+action]; - juint* bc_counter_addr = NULL; + juint* bc_counter_addr = nullptr; juint bc_counter = 0; // Look for an unused counter, or an exact match to this BC. if (bc != Bytecodes::_illegal) { for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) { juint* counter_addr = &cases[bc_case]; juint counter = *counter_addr; - if ((counter == 0 && bc_counter_addr == NULL) + if ((counter == 0 && bc_counter_addr == nullptr) || (Bytecodes::Code)(counter & LSB_MASK) == bc) { // this counter is either free or is already devoted to this BC bc_counter_addr = counter_addr; @@ -2655,7 +2655,7 @@ void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action, } } } - if (bc_counter_addr == NULL) { + if (bc_counter_addr == nullptr) { // Overflow, or no given bytecode. bc_counter_addr = &cases[BC_CASE_LIMIT-1]; bc_counter = (*bc_counter_addr & ~LSB_MASK); // clear LSB @@ -2672,14 +2672,14 @@ jint Deoptimization::total_deoptimization_count() { // deoptimizations with the specific 'action' or 'reason' respectively. // If both arguments are null, the method returns the total deopt count. jint Deoptimization::deoptimization_count(const char *reason_str, const char *action_str) { - if (reason_str == NULL && action_str == NULL) { + if (reason_str == nullptr && action_str == nullptr) { return total_deoptimization_count(); } juint counter = 0; for (int reason = 0; reason < Reason_LIMIT; reason++) { - if (reason_str == NULL || !strcmp(reason_str, trap_reason_name(reason))) { + if (reason_str == nullptr || !strcmp(reason_str, trap_reason_name(reason))) { for (int action = 0; action < Action_LIMIT; action++) { - if (action_str == NULL || !strcmp(action_str, trap_action_name(action))) { + if (action_str == nullptr || !strcmp(action_str, trap_action_name(action))) { juint* cases = _deoptimization_hist[reason][1+action]; for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) { counter += cases[bc_case] >> LSB_BITS; @@ -2696,7 +2696,7 @@ void Deoptimization::print_statistics() { juint account = total; if (total != 0) { ttyLocker ttyl; - if (xtty != NULL) xtty->head("statistics type='deoptimization'"); + if (xtty != nullptr) xtty->head("statistics type='deoptimization'"); tty->print_cr("Deoptimization traps recorded:"); #define PRINT_STAT_LINE(name, r) \ tty->print_cr(" %4d (%4.1f%%) %s", (int)(r), ((r) * 100.0) / total, name); @@ -2728,7 +2728,7 @@ void Deoptimization::print_statistics() { PRINT_STAT_LINE("unaccounted", account); } #undef PRINT_STAT_LINE - if (xtty != NULL) xtty->tail("statistics"); + if (xtty != nullptr) xtty->tail("statistics"); } } diff --git a/src/hotspot/share/runtime/deoptimization.hpp b/src/hotspot/share/runtime/deoptimization.hpp index 19d05bb0aa700..7fe3701b44835 100644 --- a/src/hotspot/share/runtime/deoptimization.hpp +++ b/src/hotspot/share/runtime/deoptimization.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -152,7 +152,7 @@ class Deoptimization : AllStatic { // activations using those nmethods. If an nmethod is passed as an argument then it is // marked_for_deoptimization and made not_entrant. Otherwise a scan of the code cache is done to // find all marked nmethods and they are made not_entrant. - static void deoptimize_all_marked(nmethod* nmethod_only = NULL); + static void deoptimize_all_marked(nmethod* nmethod_only = nullptr); public: // Deoptimizes a frame lazily. Deopt happens on return to the frame. diff --git a/src/hotspot/share/runtime/escapeBarrier.cpp b/src/hotspot/share/runtime/escapeBarrier.cpp index 83f0684ceaae9..5d80cd8f5e3e3 100644 --- a/src/hotspot/share/runtime/escapeBarrier.cpp +++ b/src/hotspot/share/runtime/escapeBarrier.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -54,7 +54,7 @@ bool EscapeBarrier::objs_are_deoptimized(JavaThread* thread, intptr_t* fr_id) { // first/oldest update holds the flag GrowableArrayView* list = JvmtiDeferredUpdates::deferred_locals(thread); bool result = false; - if (list != NULL) { + if (list != nullptr) { for (int i = 0; i < list->length(); i++) { if (list->at(i)->matches(fr_id)) { result = list->at(i)->objects_are_deoptimized(); @@ -88,12 +88,12 @@ bool EscapeBarrier::deoptimize_objects(int d1, int d2) { int cur_depth = 0; // Skip frames at depth < d1 - while (vf != NULL && cur_depth < d1) { + while (vf != nullptr && cur_depth < d1) { cur_depth++; vf = vf->sender(); } - while (vf != NULL && ((cur_depth <= d2) || !vf->is_entry_frame())) { + while (vf != nullptr && ((cur_depth <= d2) || !vf->is_entry_frame())) { if (vf->is_compiled_frame()) { compiledVFrame* cvf = compiledVFrame::cast(vf); // Deoptimize frame and local objects if any exist. @@ -125,7 +125,7 @@ bool EscapeBarrier::deoptimize_objects_all_threads() { for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { oop vt_oop = jt->jvmti_vthread(); // Skip virtual threads - if (vt_oop != NULL && java_lang_VirtualThread::is_instance(vt_oop)) { + if (vt_oop != nullptr && java_lang_VirtualThread::is_instance(vt_oop)) { continue; } if (jt->frames_to_pop_failed_realloc() > 0) { @@ -143,7 +143,7 @@ bool EscapeBarrier::deoptimize_objects_all_threads() { assert(jt->frame_anchor()->walkable(), "The stack of JavaThread " PTR_FORMAT " is not walkable. Thread state is %d", p2i(jt), jt->thread_state()); - while (vf != NULL) { + while (vf != nullptr) { if (vf->is_compiled_frame()) { compiledVFrame* cvf = compiledVFrame::cast(vf); if ((cvf->has_ea_local_in_scope() || cvf->arg_escape()) && @@ -174,8 +174,8 @@ class EscapeBarrierSuspendHandshake : public HandshakeClosure { }; void EscapeBarrier::sync_and_suspend_one() { - assert(_calling_thread != NULL, "calling thread must not be NULL"); - assert(_deoptee_thread != NULL, "deoptee thread must not be NULL"); + assert(_calling_thread != nullptr, "calling thread must not be null"); + assert(_deoptee_thread != nullptr, "deoptee thread must not be null"); assert(barrier_active(), "should not call"); // Sync with other threads that might be doing deoptimizations @@ -205,7 +205,7 @@ void EscapeBarrier::sync_and_suspend_one() { void EscapeBarrier::sync_and_suspend_all() { assert(barrier_active(), "should not call"); - assert(_calling_thread != NULL, "calling thread must not be NULL"); + assert(_calling_thread != nullptr, "calling thread must not be null"); assert(all_threads(), "sanity"); // Sync with other threads that might be doing deoptimizations @@ -306,7 +306,7 @@ static void set_objs_are_deoptimized(JavaThread* thread, intptr_t* fr_id) { GrowableArrayView* list = JvmtiDeferredUpdates::deferred_locals(thread); DEBUG_ONLY(bool found = false); - if (list != NULL) { + if (list != nullptr) { for (int i = 0; i < list->length(); i++) { if (list->at(i)->matches(fr_id)) { DEBUG_ONLY(found = true); diff --git a/src/hotspot/share/runtime/escapeBarrier.hpp b/src/hotspot/share/runtime/escapeBarrier.hpp index 67b6be39683e8..df32deef98639 100644 --- a/src/hotspot/share/runtime/escapeBarrier.hpp +++ b/src/hotspot/share/runtime/escapeBarrier.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -79,7 +79,7 @@ class EscapeBarrier : StackObj { // Revert ea based optimizations for all java threads EscapeBarrier(bool barrier_active, JavaThread* calling_thread) - : _calling_thread(calling_thread), _deoptee_thread(NULL), + : _calling_thread(calling_thread), _deoptee_thread(nullptr), _barrier_active(barrier_active && (JVMCI_ONLY(UseJVMCICompiler) NOT_JVMCI(false) COMPILER2_PRESENT(|| DoEscapeAnalysis))) { @@ -130,7 +130,7 @@ class EscapeBarrier : StackObj { } // Should revert optimizations for all threads. - bool all_threads() const { return _deoptee_thread == NULL; } + bool all_threads() const { return _deoptee_thread == nullptr; } // Current thread deoptimizes its own objects. bool self_deopt() const { return _calling_thread == _deoptee_thread; } diff --git a/src/hotspot/share/runtime/fieldDescriptor.cpp b/src/hotspot/share/runtime/fieldDescriptor.cpp index d642d65f5cb67..d5efb6ea3586c 100644 --- a/src/hotspot/share/runtime/fieldDescriptor.cpp +++ b/src/hotspot/share/runtime/fieldDescriptor.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,7 +37,7 @@ Symbol* fieldDescriptor::generic_signature() const { if (!has_generic_signature()) { - return NULL; + return nullptr; } int idx = 0; @@ -61,16 +61,16 @@ bool fieldDescriptor::is_trusted_final() const { AnnotationArray* fieldDescriptor::annotations() const { InstanceKlass* ik = field_holder(); Array* md = ik->fields_annotations(); - if (md == NULL) - return NULL; + if (md == nullptr) + return nullptr; return md->at(index()); } AnnotationArray* fieldDescriptor::type_annotations() const { InstanceKlass* ik = field_holder(); Array* type_annos = ik->fields_type_annotations(); - if (type_annos == NULL) - return NULL; + if (type_annos == nullptr) + return nullptr; return type_annos->at(index()); } @@ -185,17 +185,17 @@ void fieldDescriptor::print_on_for(outputStream* st, oop obj) { st->print("%s", obj->bool_field(offset()) ? "true" : "false"); break; case T_ARRAY: - if (obj->obj_field(offset()) != NULL) { + if (obj->obj_field(offset()) != nullptr) { obj->obj_field(offset())->print_value_on(st); } else { - st->print("NULL"); + st->print("nullptr"); } break; case T_OBJECT: - if (obj->obj_field(offset()) != NULL) { + if (obj->obj_field(offset()) != nullptr) { obj->obj_field(offset())->print_value_on(st); } else { - st->print("NULL"); + st->print("nullptr"); } break; default: diff --git a/src/hotspot/share/runtime/flags/debug_globals.hpp b/src/hotspot/share/runtime/flags/debug_globals.hpp index 177a4ae074b62..9f95c2fcb43ed 100644 --- a/src/hotspot/share/runtime/flags/debug_globals.hpp +++ b/src/hotspot/share/runtime/flags/debug_globals.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -59,7 +59,7 @@ range, \ constraint) \ \ - product(ccstr, DummyManageableStringFlag, NULL, MANAGEABLE, \ + product(ccstr, DummyManageableStringFlag, nullptr, MANAGEABLE, \ "Dummy flag for testing string handling in WriteableFlags") \ \ product(bool, TestFlagFor_bool, false, \ diff --git a/src/hotspot/share/runtime/flags/jvmFlag.cpp b/src/hotspot/share/runtime/flags/jvmFlag.cpp index c06616b58fd24..51c4ef2815773 100644 --- a/src/hotspot/share/runtime/flags/jvmFlag.cpp +++ b/src/hotspot/share/runtime/flags/jvmFlag.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -98,7 +98,7 @@ void JVMFlag::set_product() { assert(is_product(), "sanity"); } -// Get custom message for this locked flag, or NULL if +// Get custom message for this locked flag, or null if // none is available. Returns message type produced. JVMFlag::MsgType JVMFlag::get_locked_message(char* buf, int buflen) const { buf[0] = '\0'; @@ -225,9 +225,9 @@ void JVMFlag::print_on(outputStream* st, bool withComments, bool printRanges) co } else if (is_ccstr()) { // Honor characters in ccstr: print multiple lines. const char* cp = get_ccstr(); - if (cp != NULL) { + if (cp != nullptr) { const char* eol; - while ((eol = strchr(cp, '\n')) != NULL) { + while ((eol = strchr(cp, '\n')) != nullptr) { size_t llen = pointer_delta(eol, cp, sizeof(char)); st->print("%.*s", (int)llen, cp); st->cr(); @@ -429,7 +429,7 @@ void JVMFlag::print_as_flag(outputStream* st) const { } else if (is_ccstr()) { st->print("-XX:%s=", _name); const char* cp = get_ccstr(); - if (cp != NULL) { + if (cp != nullptr) { // Need to turn embedded '\n's back into separate arguments // Not so efficient to print one character at a time, // but the choice is to do the transformation to a buffer @@ -460,7 +460,7 @@ const char* JVMFlag::flag_error_str(JVMFlag::Error error) { case JVMFlag::INVALID_FLAG: return "INVALID_FLAG"; case JVMFlag::ERR_OTHER: return "ERR_OTHER"; case JVMFlag::SUCCESS: return "SUCCESS"; - default: ShouldNotReachHere(); return "NULL"; + default: ShouldNotReachHere(); return "nullptr"; } } @@ -543,7 +543,7 @@ const int EXPERIMENTAL = JVMFlag::KIND_EXPERIMENTAL; static JVMFlag flagTable[NUM_JVMFlagsEnum + 1] = { MATERIALIZE_ALL_FLAGS - JVMFlag() // The iteration code wants a flag with a NULL name at the end of the table. + JVMFlag() // The iteration code wants a flag with a null name at the end of the table. }; // We want flagTable[] to be completely initialized at C++ compilation time, which requires @@ -572,33 +572,33 @@ const int JVMFlag::type_signatures[] = { // Search the flag table for a named flag JVMFlag* JVMFlag::find_flag(const char* name, size_t length, bool allow_locked, bool return_flag) { JVMFlag* flag = JVMFlagLookup::find(name, length); - if (flag != NULL) { + if (flag != nullptr) { // Found a matching entry. // Don't report notproduct and develop flags in product builds. if (flag->is_constant_in_binary()) { - return (return_flag ? flag : NULL); + return (return_flag ? flag : nullptr); } // Report locked flags only if allowed. if (!(flag->is_unlocked() || flag->is_unlocker())) { if (!allow_locked) { // disable use of locked flags, e.g. diagnostic, experimental, // etc. until they are explicitly unlocked - return NULL; + return nullptr; } } return flag; } // JVMFlag name is not in the flag table - return NULL; + return nullptr; } JVMFlag* JVMFlag::fuzzy_match(const char* name, size_t length, bool allow_locked) { float VMOptionsFuzzyMatchSimilarity = 0.7f; - JVMFlag* match = NULL; + JVMFlag* match = nullptr; float score; float max_score = -1; - for (JVMFlag* current = &flagTable[0]; current->_name != NULL; current++) { + for (JVMFlag* current = &flagTable[0]; current->_name != nullptr; current++) { score = StringUtils::similarity(current->_name, strlen(current->_name), name, length); if (score > max_score) { max_score = score; @@ -606,18 +606,18 @@ JVMFlag* JVMFlag::fuzzy_match(const char* name, size_t length, bool allow_locked } } - if (match == NULL) { - return NULL; + if (match == nullptr) { + return nullptr; } if (!(match->is_unlocked() || match->is_unlocker())) { if (!allow_locked) { - return NULL; + return nullptr; } } if (max_score < VMOptionsFuzzyMatchSimilarity) { - return NULL; + return nullptr; } return match; @@ -690,7 +690,7 @@ void JVMFlag::assert_valid_flag_enum(JVMFlagsEnum i) { } void JVMFlag::check_all_flag_declarations() { - for (JVMFlag* current = &flagTable[0]; current->_name != NULL; current++) { + for (JVMFlag* current = &flagTable[0]; current->_name != nullptr; current++) { int flags = static_cast(current->_flags); // Backwards compatibility. This will be relaxed/removed in JDK-7123237. int mask = JVMFlag::KIND_DIAGNOSTIC | JVMFlag::KIND_MANAGEABLE | JVMFlag::KIND_EXPERIMENTAL; @@ -728,7 +728,7 @@ void JVMFlag::printFlags(outputStream* out, bool withComments, bool printRanges, // Sort JVMFlag** array = NEW_C_HEAP_ARRAY_RETURN_NULL(JVMFlag*, length, mtArguments); - if (array != NULL) { + if (array != nullptr) { for (size_t i = 0; i < length; i++) { array[i] = &flagTable[i]; } diff --git a/src/hotspot/share/runtime/flags/jvmFlagAccess.cpp b/src/hotspot/share/runtime/flags/jvmFlagAccess.cpp index 9baf90753cb31..3ecaa70a95d80 100644 --- a/src/hotspot/share/runtime/flags/jvmFlagAccess.cpp +++ b/src/hotspot/share/runtime/flags/jvmFlagAccess.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -63,7 +63,7 @@ class TypedFlagAccessImpl : public FlagAccessImpl { JVMFlag::Error check_constraint_and_set(JVMFlag* flag, void* value_addr, JVMFlagOrigin origin, bool verbose) const { T value = *((T*)value_addr); const JVMTypedFlagLimit* constraint = (const JVMTypedFlagLimit*)JVMFlagLimit::get_constraint(flag); - if (constraint != NULL && constraint->phase() <= static_cast(JVMFlagLimit::validating_phase())) { + if (constraint != nullptr && constraint->phase() <= static_cast(JVMFlagLimit::validating_phase())) { JVMFlag::Error err = typed_check_constraint(constraint->constraint_func(), value, verbose); if (err != JVMFlag::SUCCESS) { return err; @@ -107,7 +107,7 @@ class RangedFlagAccessImpl : public TypedFlagAccessImpl { bool verbose = JVMFlagLimit::verbose_checks_needed(); const JVMTypedFlagLimit* range = (const JVMTypedFlagLimit*)JVMFlagLimit::get_range(flag); - if (range != NULL) { + if (range != nullptr) { if ((value < range->min()) || (value > range->max())) { range_error(flag->name(), value, range->min(), range->max(), verbose); return JVMFlag::OUT_OF_BOUNDS; @@ -119,7 +119,7 @@ class RangedFlagAccessImpl : public TypedFlagAccessImpl { virtual JVMFlag::Error check_range(const JVMFlag* flag, bool verbose) const { const JVMTypedFlagLimit* range = (const JVMTypedFlagLimit*)JVMFlagLimit::get_range(flag); - if (range != NULL) { + if (range != nullptr) { T value = flag->read(); if ((value < range->min()) || (value > range->max())) { range_error(flag->name(), value, range->min(), range->max(), verbose); @@ -301,16 +301,16 @@ JVMFlag::Error JVMFlagAccess::set_impl(JVMFlag* flag, void* value, JVMFlagOrigin } JVMFlag::Error JVMFlagAccess::set_ccstr(JVMFlag* flag, ccstr* value, JVMFlagOrigin origin) { - if (flag == NULL) return JVMFlag::INVALID_FLAG; + if (flag == nullptr) return JVMFlag::INVALID_FLAG; if (!flag->is_ccstr()) return JVMFlag::WRONG_FORMAT; ccstr old_value = flag->get_ccstr(); trace_flag_changed(flag, old_value, *value, origin); - char* new_value = NULL; - if (*value != NULL) { + char* new_value = nullptr; + if (*value != nullptr) { new_value = os::strdup_check_oom(*value); } flag->set_ccstr(new_value); - if (!flag->is_default() && old_value != NULL) { + if (!flag->is_default() && old_value != nullptr) { // Old value is heap allocated so free it. FREE_C_HEAP_ARRAY(char, old_value); } @@ -318,7 +318,7 @@ JVMFlag::Error JVMFlagAccess::set_ccstr(JVMFlag* flag, ccstr* value, JVMFlagOrig // The callers typically don't care what the old value is. // If the caller really wants to know the old value, read it (and make a copy if necessary) // before calling this API. - *value = NULL; + *value = nullptr; flag->set_origin(origin); return JVMFlag::SUCCESS; } @@ -355,11 +355,11 @@ void JVMFlagAccess::print_range(outputStream* st, const JVMFlag* flag, const JVM void JVMFlagAccess::print_range(outputStream* st, const JVMFlag* flag) { const JVMFlagLimit* range = JVMFlagLimit::get_range(flag); - if (range != NULL) { + if (range != nullptr) { print_range(st, flag, range); } else { const JVMFlagLimit* limit = JVMFlagLimit::get_constraint(flag); - if (limit != NULL) { + if (limit != nullptr) { void* func = limit->constraint_func(); // Two special cases where the lower limit of the range is defined by an os:: function call diff --git a/src/hotspot/share/runtime/flags/jvmFlagAccess.hpp b/src/hotspot/share/runtime/flags/jvmFlagAccess.hpp index e70792f0f3514..c9adc3433f15e 100644 --- a/src/hotspot/share/runtime/flags/jvmFlagAccess.hpp +++ b/src/hotspot/share/runtime/flags/jvmFlagAccess.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,7 +41,7 @@ class outputStream; // /* If you use a wrong type, a run-time assertion will happen */ // intx v = flag->read(); // -// /* If you use a wrong type, or a NULL flag, an error code is returned */ +// /* If you use a wrong type, or a null flag, an error code is returned */ // JVMFlag::Error err = JVMFlagAccess::get(flag, &v, origin); #define JVM_FLAG_TYPE(t) \ @@ -80,7 +80,7 @@ class JVMFlagAccess : AllStatic { // set(flag, double_ptr); assert(JVMFlag::is_compatible_type(type_enum), "must be"); - if (flag == NULL) { + if (flag == nullptr) { return JVMFlag::INVALID_FLAG; } if (!is_correct_type(flag, type_enum)) { @@ -110,7 +110,7 @@ class JVMFlagAccess : AllStatic { // set(flag, double_ptr); assert(JVMFlag::is_compatible_type(type_enum), "must be"); - if (flag == NULL) { + if (flag == nullptr) { return JVMFlag::INVALID_FLAG; } if (!is_correct_type(flag, type_enum)) { diff --git a/src/hotspot/share/runtime/flags/jvmFlagLimit.cpp b/src/hotspot/share/runtime/flags/jvmFlagLimit.cpp index bcc1565e165bd..e73f4a7585650 100644 --- a/src/hotspot/share/runtime/flags/jvmFlagLimit.cpp +++ b/src/hotspot/share/runtime/flags/jvmFlagLimit.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -66,14 +66,14 @@ struct DummyLimit { template class LimitGetter { public: - // These functions return NULL for develop flags in a PRODUCT build + // These functions return null for develop flags in a PRODUCT build static constexpr const JVMFlagLimit* no_limit(...) { - return NULL; + return nullptr; } // This is for flags that have neither range no constraint. We don't need the JVMFlagLimit struct. static constexpr const JVMFlagLimit* get_limit(const JVMTypedFlagLimit* p, int dummy) { - return NULL; + return nullptr; } static constexpr const JVMFlagLimit* get_limit(const JVMTypedFlagLimit* p, int dummy, T min, T max) { @@ -123,7 +123,7 @@ constexpr JVMTypedFlagLimit limit_dummy static constexpr const JVMFlagLimit* const flagLimitTable[1 + NUM_JVMFlagsEnum] = { // Because FLAG_LIMIT_PTR must start with an "),", we have to place a dummy element here. - LimitGetter::get_limit(NULL, 0 + LimitGetter::get_limit(nullptr, 0 #ifdef PRODUCT ALL_FLAGS(FLAG_LIMIT_PTR_NONE, @@ -154,7 +154,7 @@ const JVMFlag* JVMFlagLimit::last_checked_flag() { if (_last_checked != INVALID_JVMFlagsEnum) { return JVMFlag::flag_from_enum(_last_checked); } else { - return NULL; + return nullptr; } } @@ -162,7 +162,7 @@ bool JVMFlagLimit::check_all_ranges() { bool status = true; for (int i = 0; i < NUM_JVMFlagsEnum; i++) { JVMFlagsEnum flag_enum = static_cast(i); - if (get_range_at(flag_enum) != NULL && + if (get_range_at(flag_enum) != nullptr && JVMFlagAccess::check_range(JVMFlag::flag_from_enum(flag_enum), true) != JVMFlag::SUCCESS) { status = false; } @@ -179,7 +179,7 @@ bool JVMFlagLimit::check_all_constraints(JVMFlagConstraintPhase phase) { for (int i = 0; i < NUM_JVMFlagsEnum; i++) { JVMFlagsEnum flag_enum = static_cast(i); const JVMFlagLimit* constraint = get_constraint_at(flag_enum); - if (constraint != NULL && constraint->phase() == static_cast(phase) && + if (constraint != nullptr && constraint->phase() == static_cast(phase) && JVMFlagAccess::check_constraint(JVMFlag::flag_from_enum(flag_enum), constraint->constraint_func(), true) != JVMFlag::SUCCESS) { status = false; diff --git a/src/hotspot/share/runtime/flags/jvmFlagLimit.hpp b/src/hotspot/share/runtime/flags/jvmFlagLimit.hpp index 482fd292af54b..f8e5847428408 100644 --- a/src/hotspot/share/runtime/flags/jvmFlagLimit.hpp +++ b/src/hotspot/share/runtime/flags/jvmFlagLimit.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -58,12 +58,12 @@ template class JVMTypedFlagLimit; // To query the range information of a JVMFlag: // JVMFlagLimit::get_range(JVMFlag*) // JVMFlagLimit::get_range_at(int flag_enum) -// If the given flag doesn't have a range, NULL is returned. +// If the given flag doesn't have a range, null is returned. // // To query the constraint information of a JVMFlag: // JVMFlagLimit::get_constraint(JVMFlag*) // JVMFlagLimit::get_constraint_at(int flag_enum) -// If the given flag doesn't have a constraint, NULL is returned. +// If the given flag doesn't have a constraint, null is returned. class JVMFlagLimit { short _constraint_func; @@ -85,11 +85,11 @@ class JVMFlagLimit { private: static const JVMFlagLimit* get_kind_at(JVMFlagsEnum flag_enum, int required_kind) { const JVMFlagLimit* limit = at(flag_enum); - if (limit != NULL && (limit->_kind & required_kind) != 0) { + if (limit != nullptr && (limit->_kind & required_kind) != 0) { _last_checked = flag_enum; return limit; } else { - return NULL; + return nullptr; } } diff --git a/src/hotspot/share/runtime/flags/jvmFlagLookup.cpp b/src/hotspot/share/runtime/flags/jvmFlagLookup.cpp index fbdfcf000fd65..adcd381206153 100644 --- a/src/hotspot/share/runtime/flags/jvmFlagLookup.cpp +++ b/src/hotspot/share/runtime/flags/jvmFlagLookup.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -70,7 +70,7 @@ JVMFlag* JVMFlagLookup::find_impl(const char* name, size_t length) const { flag_enum = (int)_table[flag_enum]; } - return NULL; + return nullptr; } JVMFlag* JVMFlagLookup::find(const char* name, size_t length) { diff --git a/src/hotspot/share/runtime/frame.cpp b/src/hotspot/share/runtime/frame.cpp index 749eb625b3b2a..37bacf9864d65 100644 --- a/src/hotspot/share/runtime/frame.cpp +++ b/src/hotspot/share/runtime/frame.cpp @@ -66,27 +66,27 @@ RegisterMap::RegisterMap(JavaThread *thread, UpdateMap update_map, ProcessFrames _process_frames = process_frames == ProcessFrames::include; _walk_cont = walk_cont == WalkContinuation::include; clear(); - DEBUG_ONLY (_update_for_id = NULL;) + DEBUG_ONLY (_update_for_id = nullptr;) NOT_PRODUCT(_skip_missing = false;) NOT_PRODUCT(_async = false;) - if (walk_cont == WalkContinuation::include && thread != NULL && thread->last_continuation() != NULL) { + if (walk_cont == WalkContinuation::include && thread != nullptr && thread->last_continuation() != nullptr) { _chunk = stackChunkHandle(Thread::current()->handle_area()->allocate_null_handle(), true /* dummy */); } _chunk_index = -1; #ifndef PRODUCT - for (int i = 0; i < reg_count ; i++ ) _location[i] = NULL; + for (int i = 0; i < reg_count ; i++ ) _location[i] = nullptr; #endif /* PRODUCT */ } RegisterMap::RegisterMap(oop continuation, UpdateMap update_map) { - _thread = NULL; + _thread = nullptr; _update_map = update_map == UpdateMap::include; _process_frames = false; _walk_cont = true; clear(); - DEBUG_ONLY (_update_for_id = NULL;) + DEBUG_ONLY (_update_for_id = nullptr;) NOT_PRODUCT(_skip_missing = false;) NOT_PRODUCT(_async = false;) @@ -94,13 +94,13 @@ RegisterMap::RegisterMap(oop continuation, UpdateMap update_map) { _chunk_index = -1; #ifndef PRODUCT - for (int i = 0; i < reg_count ; i++ ) _location[i] = NULL; + for (int i = 0; i < reg_count ; i++ ) _location[i] = nullptr; #endif /* PRODUCT */ } RegisterMap::RegisterMap(const RegisterMap* map) { assert(map != this, "bad initialization parameter"); - assert(map != NULL, "RegisterMap must be present"); + assert(map != nullptr, "RegisterMap must be present"); _thread = map->thread(); _update_map = map->update_map(); _process_frames = map->process_frames(); @@ -134,16 +134,16 @@ RegisterMap::RegisterMap(const RegisterMap* map) { } oop RegisterMap::cont() const { - return _chunk() != NULL ? _chunk()->cont() : (oop)NULL; + return _chunk() != nullptr ? _chunk()->cont() : (oop)nullptr; } void RegisterMap::set_stack_chunk(stackChunkOop chunk) { - assert(chunk == NULL || _walk_cont, ""); - assert(chunk == NULL || _chunk.not_null(), ""); + assert(chunk == nullptr || _walk_cont, ""); + assert(chunk == nullptr || _chunk.not_null(), ""); if (_chunk.is_null()) return; log_trace(continuations)("set_stack_chunk: " INTPTR_FORMAT " this: " INTPTR_FORMAT, p2i((oopDesc*)chunk), p2i(this)); _chunk.replace(chunk); // reuse handle. see comment above in the constructor - if (chunk == NULL) { + if (chunk == nullptr) { _chunk_index = -1; } else { _chunk_index++; @@ -169,7 +169,7 @@ VMReg RegisterMap::find_register_spilled_here(void* p, intptr_t* sp) { VMReg r = VMRegImpl::as_VMReg(i); if (p == location(r, sp)) return r; } - return NULL; + return nullptr; } void RegisterMap::print_on(outputStream* st) const { @@ -178,7 +178,7 @@ void RegisterMap::print_on(outputStream* st) const { VMReg r = VMRegImpl::as_VMReg(i); intptr_t* src = (intptr_t*) location(r, nullptr); - if (src != NULL) { + if (src != nullptr) { r->print_on(st); st->print(" [" INTPTR_FORMAT "] = ", p2i(src)); @@ -219,7 +219,7 @@ address frame::raw_pc() const { // void frame::set_pc(address newpc) { #ifdef ASSERT - if (_cb != NULL && _cb->is_nmethod()) { + if (_cb != nullptr && _cb->is_nmethod()) { assert(!((nmethod*)_cb)->is_deopt_pc(_pc), "invariant violation"); } #endif // ASSERT @@ -237,7 +237,7 @@ bool frame::is_ignored_frame() const { } bool frame::is_native_frame() const { - return (_cb != NULL && + return (_cb != nullptr && _cb->is_nmethod() && ((nmethod*)_cb)->is_native_method()); } @@ -249,11 +249,11 @@ bool frame::is_java_frame() const { } bool frame::is_runtime_frame() const { - return (_cb != NULL && _cb->is_runtime_stub()); + return (_cb != nullptr && _cb->is_runtime_stub()); } bool frame::is_safepoint_blob_frame() const { - return (_cb != NULL && _cb->is_safepoint_stub()); + return (_cb != nullptr && _cb->is_safepoint_stub()); } // testers @@ -286,7 +286,7 @@ JavaCallWrapper* frame::entry_frame_call_wrapper_if_safe(JavaThread* thread) con return *jcw; } - return NULL; + return nullptr; } bool frame::is_entry_frame_valid(JavaThread* thread) const { @@ -304,7 +304,7 @@ bool frame::is_entry_frame_valid(JavaThread* thread) const { bool frame::should_be_deoptimized() const { if (_deopt_state == is_deoptimized || !is_compiled_frame() ) return false; - assert(_cb != NULL && _cb->is_compiled(), "must be an nmethod"); + assert(_cb != nullptr && _cb->is_compiled(), "must be an nmethod"); CompiledMethod* nm = (CompiledMethod *)_cb; if (TraceDependencies) { tty->print("checking (%s) ", nm->is_marked_for_deoptimization() ? "true" : "false"); @@ -331,11 +331,11 @@ bool frame::can_be_deoptimized() const { } void frame::deoptimize(JavaThread* thread) { - assert(thread == NULL + assert(thread == nullptr || (thread->frame_anchor()->has_last_Java_frame() && thread->frame_anchor()->walkable()), "must be"); // Schedule deoptimization of an nmethod activation with this frame. - assert(_cb != NULL && _cb->is_compiled(), "must be"); + assert(_cb != nullptr && _cb->is_compiled(), "must be"); // If the call site is a MethodHandle call site use the MH deopt handler. CompiledMethod* cm = (CompiledMethod*) _cb; @@ -351,7 +351,7 @@ void frame::deoptimize(JavaThread* thread) { assert(is_deoptimized_frame(), "must be"); #ifdef ASSERT - if (thread != NULL) { + if (thread != nullptr) { frame check = thread->last_frame(); if (is_older(check.id())) { RegisterMap map(thread, @@ -495,16 +495,16 @@ const char* frame::print_name() const { if (is_deoptimized_frame()) return "Deoptimized"; return "Compiled"; } - if (sp() == NULL) return "Empty"; + if (sp() == nullptr) return "Empty"; return "C"; } void frame::print_value_on(outputStream* st, JavaThread *thread) const { NOT_PRODUCT(address begin = pc()-40;) - NOT_PRODUCT(address end = NULL;) + NOT_PRODUCT(address end = nullptr;) st->print("%s frame (sp=" INTPTR_FORMAT " unextended sp=" INTPTR_FORMAT, print_name(), p2i(sp()), p2i(unextended_sp())); - if (sp() != NULL) + if (sp() != nullptr) st->print(", fp=" INTPTR_FORMAT ", real_fp=" INTPTR_FORMAT ", pc=" INTPTR_FORMAT, p2i(fp()), p2i(real_fp()), p2i(pc())); st->print_cr(")"); @@ -515,7 +515,7 @@ void frame::print_value_on(outputStream* st, JavaThread *thread) const { NOT_PRODUCT(begin = desc->begin(); end = desc->end();) } else if (Interpreter::contains(pc())) { InterpreterCodelet* desc = Interpreter::codelet_containing(pc()); - if (desc != NULL) { + if (desc != nullptr) { st->print("~"); desc->print_on(st); NOT_PRODUCT(begin = desc->code_begin(); end = desc->code_end();) @@ -525,10 +525,10 @@ void frame::print_value_on(outputStream* st, JavaThread *thread) const { } #ifndef PRODUCT - if (_cb != NULL) { + if (_cb != nullptr) { st->print(" "); _cb->print_value_on(st); - if (end == NULL) { + if (end == nullptr) { begin = _cb->code_begin(); end = _cb->code_end(); } @@ -538,7 +538,7 @@ void frame::print_value_on(outputStream* st, JavaThread *thread) const { } void frame::print_on(outputStream* st) const { - print_value_on(st,NULL); + print_value_on(st,nullptr); if (is_interpreted_frame()) { interpreter_frame_print_on(st); } @@ -599,7 +599,7 @@ void frame::print_C_frame(outputStream* st, char* buf, int buflen, address pc) { int offset; bool found; - if (buf == NULL || buflen < 1) return; + if (buf == nullptr || buflen < 1) return; // libname buf[0] = '\0'; found = os::dll_address_to_library_name(pc, buf, buflen, &offset); @@ -608,7 +608,7 @@ void frame::print_C_frame(outputStream* st, char* buf, int buflen, address pc) { const char *p1, *p2; p1 = buf; int len = (int)strlen(os::file_separator()); - while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len; + while ((p2 = strstr(p1, os::file_separator())) != nullptr) p1 = p2 + len; st->print(" [%s+0x%x]", p1, offset); } else { st->print(" " PTR_FORMAT, p2i(pc)); @@ -635,10 +635,10 @@ void frame::print_C_frame(outputStream* st, char* buf, int buflen, address pc) { // suggests the problem is in user lib; everything else is likely a VM bug. void frame::print_on_error(outputStream* st, char* buf, int buflen, bool verbose) const { - if (_cb != NULL) { + if (_cb != nullptr) { if (Interpreter::contains(pc())) { Method* m = this->interpreter_frame_method(); - if (m != NULL) { + if (m != nullptr) { m->name_and_sig_as_C_string(buf, buflen); st->print("j %s", buf); st->print("+%d", this->interpreter_frame_bci()); @@ -646,7 +646,7 @@ void frame::print_on_error(outputStream* st, char* buf, int buflen, bool verbose if (module->is_named()) { module->name()->as_C_string(buf, buflen); st->print(" %s", buf); - if (module->version() != NULL) { + if (module->version() != nullptr) { module->version()->as_C_string(buf, buflen); st->print("@%s", buf); } @@ -656,7 +656,7 @@ void frame::print_on_error(outputStream* st, char* buf, int buflen, bool verbose } } else if (StubRoutines::contains(pc())) { StubCodeDesc* desc = StubCodeDesc::desc_for(pc()); - if (desc != NULL) { + if (desc != nullptr) { st->print("v ~StubRoutines::%s " PTR_FORMAT, desc->name(), p2i(pc())); } else { st->print("v ~StubRoutines::" PTR_FORMAT, p2i(pc())); @@ -666,7 +666,7 @@ void frame::print_on_error(outputStream* st, char* buf, int buflen, bool verbose } else if (_cb->is_compiled()) { CompiledMethod* cm = (CompiledMethod*)_cb; Method* m = cm->method(); - if (m != NULL) { + if (m != nullptr) { if (cm->is_nmethod()) { nmethod* nm = cm->as_nmethod(); st->print("J %d%s", nm->compile_id(), (nm->is_osr_method() ? "%" : "")); @@ -678,7 +678,7 @@ void frame::print_on_error(outputStream* st, char* buf, int buflen, bool verbose if (module->is_named()) { module->name()->as_C_string(buf, buflen); st->print(" %s", buf); - if (module->version() != NULL) { + if (module->version() != nullptr) { module->version()->as_C_string(buf, buflen); st->print("@%s", buf); } @@ -689,7 +689,7 @@ void frame::print_on_error(outputStream* st, char* buf, int buflen, bool verbose if (cm->is_nmethod()) { nmethod* nm = cm->as_nmethod(); const char* jvmciName = nm->jvmci_name(); - if (jvmciName != NULL) { + if (jvmciName != nullptr) { st->print(" (%s)", jvmciName); } } @@ -847,7 +847,7 @@ class EntryFrameOopFinder: public SignatureIterator { public: EntryFrameOopFinder(const frame* frame, Symbol* signature, bool is_static) : SignatureIterator(signature) { - _f = NULL; // will be set later + _f = nullptr; // will be set later _fr = frame; _is_static = is_static; _offset = ArgumentSizeComputer(signature).size(); // pre-decremented down to zero @@ -908,7 +908,7 @@ void frame::oops_interpreted_do(OopClosure* f, const RegisterMap* map, bool quer int max_locals = m->is_native() ? m->size_of_parameters() : m->max_locals(); - Symbol* signature = NULL; + Symbol* signature = nullptr; bool has_receiver = false; // Process a callee's arguments if we are at a call site @@ -957,10 +957,10 @@ void frame::oops_interpreted_arguments_do(Symbol* signature, bool has_receiver, } void frame::oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, DerivedOopClosure* df, DerivedPointerIterationMode derived_mode, const RegisterMap* reg_map) const { - assert(_cb != NULL, "sanity check"); - assert((oop_map() == NULL) == (_cb->oop_maps() == NULL), "frame and _cb must agree that oopmap is set or not"); - if (oop_map() != NULL) { - if (df != NULL) { + assert(_cb != nullptr, "sanity check"); + assert((oop_map() == nullptr) == (_cb->oop_maps() == nullptr), "frame and _cb must agree that oopmap is set or not"); + if (oop_map() != nullptr) { + if (df != nullptr) { _oop_map->oops_do(this, reg_map, f, df); } else { _oop_map->oops_do(this, reg_map, f, derived_mode); @@ -977,7 +977,7 @@ void frame::oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, DerivedOopClos // prevent them from being collected. However, this visit should be // restricted to certain phases of the collection only. The // closure decides how it wants nmethods to be traced. - if (cf != NULL) + if (cf != nullptr) cf->do_code_blob(_cb); } @@ -1004,13 +1004,13 @@ class CompiledArgumentOopFinder: public SignatureIterator { VMReg reg = _regs[_offset].first(); oop *loc = _fr.oopmapreg_to_oop_location(reg, _reg_map); #ifdef ASSERT - if (loc == NULL) { + if (loc == nullptr) { if (_reg_map->should_skip_missing()) { return; } tty->print_cr("Error walking frame oops:"); _fr.print_on(tty); - assert(loc != NULL, "missing register map entry reg: " INTPTR_FORMAT " %s loc: " INTPTR_FORMAT, reg->value(), reg->name(), p2i(loc)); + assert(loc != nullptr, "missing register map entry reg: " INTPTR_FORMAT " %s loc: " INTPTR_FORMAT, reg->value(), reg->name(), p2i(loc)); } #endif _f->do_oop(loc); @@ -1066,9 +1066,9 @@ oop frame::retrieve_receiver(RegisterMap* reg_map) { // First consult the ADLC on where it puts parameter 0 for this signature. VMReg reg = SharedRuntime::name_for_receiver(); oop* oop_adr = caller.oopmapreg_to_oop_location(reg, reg_map); - if (oop_adr == NULL) { - guarantee(oop_adr != NULL, "bad register save location"); - return NULL; + if (oop_adr == nullptr) { + guarantee(oop_adr != nullptr, "bad register save location"); + return nullptr; } oop r = *oop_adr; assert(Universe::heap()->is_in_or_null(r), "bad receiver: " INTPTR_FORMAT " (" INTX_FORMAT ")", p2i(r), p2i(r)); @@ -1078,7 +1078,7 @@ oop frame::retrieve_receiver(RegisterMap* reg_map) { BasicLock* frame::get_native_monitor() { nmethod* nm = (nmethod*)_cb; - assert(_cb != NULL && _cb->is_nmethod() && nm->method()->is_native(), + assert(_cb != nullptr && _cb->is_nmethod() && nm->method()->is_native(), "Should not call this unless it's a native nmethod"); int byte_offset = in_bytes(nm->native_basic_lock_sp_offset()); assert(byte_offset >= 0, "should not see invalid offset"); @@ -1087,7 +1087,7 @@ BasicLock* frame::get_native_monitor() { oop frame::get_native_receiver() { nmethod* nm = (nmethod*)_cb; - assert(_cb != NULL && _cb->is_nmethod() && nm->method()->is_native(), + assert(_cb != nullptr && _cb->is_nmethod() && nm->method()->is_native(), "Should not call this unless it's a native nmethod"); int byte_offset = in_bytes(nm->native_receiver_sp_offset()); assert(byte_offset >= 0, "should not see invalid offset"); @@ -1097,7 +1097,7 @@ oop frame::get_native_receiver() { } void frame::oops_entry_do(OopClosure* f, const RegisterMap* map) const { - assert(map != NULL, "map must be set"); + assert(map != nullptr, "map must be set"); if (map->include_argument_oops()) { // must collect argument oops, as nobody else is doing it Thread *thread = Thread::current(); @@ -1119,7 +1119,7 @@ bool frame::is_deoptimized_frame() const { * as in return address being patched. * It doesn't care if the OP that we return to is a * deopt instruction */ - /*if (_cb != NULL && _cb->is_nmethod()) { + /*if (_cb != nullptr && _cb->is_nmethod()) { return NativeDeoptInstruction::is_deopt_at(_pc); }*/ return false; @@ -1131,7 +1131,7 @@ void frame::oops_do_internal(OopClosure* f, CodeBlobClosure* cf, #ifndef PRODUCT // simulate GC crash here to dump java thread in error report if (CrashGCForDumpingJavaThread) { - char *t = NULL; + char *t = nullptr; *t = 'c'; } #endif @@ -1149,7 +1149,7 @@ void frame::oops_do_internal(OopClosure* f, CodeBlobClosure* cf, } void frame::nmethods_do(CodeBlobClosure* cf) const { - if (_cb != NULL && _cb->is_nmethod()) { + if (_cb != nullptr && _cb->is_nmethod()) { cf->do_code_blob(_cb); } } @@ -1160,7 +1160,7 @@ void frame::metadata_do(MetadataClosure* f) const { ResourceMark rm; if (is_interpreted_frame()) { Method* m = this->interpreter_frame_method(); - assert(m != NULL, "expecting a method in this frame"); + assert(m != nullptr, "expecting a method in this frame"); f->do_metadata(m); } } @@ -1188,7 +1188,7 @@ void frame::verify(const RegisterMap* map) const { #endif if (map->update_map()) { // The map has to be up-to-date for the current frame - oops_do_internal(&VerifyOopClosure::verify_oop, NULL, NULL, DerivedPointerIterationMode::_ignore, map, false); + oops_do_internal(&VerifyOopClosure::verify_oop, nullptr, nullptr, DerivedPointerIterationMode::_ignore, map, false); } } @@ -1299,8 +1299,8 @@ class FrameValuesOopMapClosure: public OopMapClosure { virtual void do_value(VMReg reg, OopMapValue::oop_types type) override { intptr_t* p = (intptr_t*)_fr->oopmapreg_to_location(reg, _reg_map); - if (p != NULL && (((intptr_t)p & WordAlignmentMask) == 0)) { - const char* type_name = NULL; + if (p != nullptr && (((intptr_t)p & WordAlignmentMask) == 0)) { + const char* type_name = nullptr; switch(type) { case OopMapValue::oop_value: type_name = "oop"; break; case OopMapValue::narrowoop_value: type_name = "narrow oop"; break; @@ -1309,7 +1309,7 @@ class FrameValuesOopMapClosure: public OopMapClosure { // case OopMapValue::live_value: type_name = "live"; break; default: break; } - if (type_name != NULL) { + if (type_name != nullptr) { _values.describe(_frame_no, p, err_msg("%s for #%d", type_name, _frame_no)); } } @@ -1344,10 +1344,10 @@ void frame::describe(FrameValues& values, int frame_no, const RegisterMap* reg_m // Label the method and current bci values.describe(-1, info_address, FormatBuffer<1024>("#%d method %s @ %d", frame_no, m->name_and_sig_as_C_string(), bci), 3); - if (desc != NULL) { + if (desc != nullptr) { values.describe(-1, info_address, err_msg("- %s codelet: %s", desc->bytecode() >= 0 ? Bytecodes::name(desc->bytecode()) : "", - desc->description() != NULL ? desc->description() : "?"), 2); + desc->description() != nullptr ? desc->description() : "?"), 2); } values.describe(-1, info_address, err_msg("- %d locals %d max stack", m->max_locals(), m->max_stack()), 2); @@ -1373,20 +1373,20 @@ void frame::describe(FrameValues& values, int frame_no, const RegisterMap* reg_m // Compute the actual expression stack size InterpreterOopMap mask; OopMapCache::compute_one_oop_map(methodHandle(Thread::current(), m), bci, &mask); - intptr_t* tos = NULL; + intptr_t* tos = nullptr; // Report each stack element and mark as owned by this frame for (int e = 0; e < mask.expression_stack_size(); e++) { tos = MAX2(tos, interpreter_frame_expression_stack_at(e)); values.describe(frame_no, interpreter_frame_expression_stack_at(e), err_msg("stack %d", e), 1); } - if (tos != NULL) { + if (tos != nullptr) { values.describe(-1, tos, err_msg("expression stack for #%d", frame_no), 2); } - if (reg_map != NULL) { + if (reg_map != nullptr) { FrameValuesOopClosure oopsFn; - oops_do(&oopsFn, NULL, &oopsFn, reg_map); + oops_do(&oopsFn, nullptr, &oopsFn, reg_map); oopsFn.describe(values, frame_no); } } else if (is_entry_frame()) { @@ -1456,29 +1456,29 @@ void frame::describe(FrameValues& values, int frame_no, const RegisterMap* reg_m } } - if (reg_map != NULL && is_java_frame()) { + if (reg_map != nullptr && is_java_frame()) { int scope_no = 0; - for (ScopeDesc* scope = cm->scope_desc_at(pc()); scope != NULL; scope = scope->sender(), scope_no++) { + for (ScopeDesc* scope = cm->scope_desc_at(pc()); scope != nullptr; scope = scope->sender(), scope_no++) { Method* m = scope->method(); int bci = scope->bci(); values.describe(-1, info_address, err_msg("- #%d scope %s @ %d", scope_no, m->name_and_sig_as_C_string(), bci), 2); { // mark locals GrowableArray* scvs = scope->locals(); - int scvs_length = scvs != NULL ? scvs->length() : 0; + int scvs_length = scvs != nullptr ? scvs->length() : 0; for (int i = 0; i < scvs_length; i++) { intptr_t* stack_address = (intptr_t*)StackValue::stack_value_address(this, reg_map, scvs->at(i)); - if (stack_address != NULL) { + if (stack_address != nullptr) { values.describe(frame_no, stack_address, err_msg("local %d for #%d (scope %d)", i, frame_no, scope_no), 1); } } } { // mark expression stack GrowableArray* scvs = scope->expressions(); - int scvs_length = scvs != NULL ? scvs->length() : 0; + int scvs_length = scvs != nullptr ? scvs->length() : 0; for (int i = 0; i < scvs_length; i++) { intptr_t* stack_address = (intptr_t*)StackValue::stack_value_address(this, reg_map, scvs->at(i)); - if (stack_address != NULL) { + if (stack_address != nullptr) { values.describe(frame_no, stack_address, err_msg("stack %d for #%d (scope %d)", i, frame_no, scope_no), 1); } } @@ -1486,10 +1486,10 @@ void frame::describe(FrameValues& values, int frame_no, const RegisterMap* reg_m } FrameValuesOopClosure oopsFn; - oops_do(&oopsFn, NULL, &oopsFn, reg_map); + oops_do(&oopsFn, nullptr, &oopsFn, reg_map); oopsFn.describe(values, frame_no); - if (oop_map() != NULL) { + if (oop_map() != nullptr) { FrameValuesOopMapClosure valuesFn(this, reg_map, values, frame_no); // also OopMapValue::live_value ?? oop_map()->all_type_do(this, OopMapValue::callee_saved_value, &valuesFn); @@ -1509,8 +1509,8 @@ void frame::describe(FrameValues& values, int frame_no, const RegisterMap* reg_m } else { // provide default info if not handled before char *info = (char *) "special frame"; - if ((_cb != NULL) && - (_cb->name() != NULL)) { + if ((_cb != nullptr) && + (_cb->name() != nullptr)) { info = (char *)_cb->name(); } values.describe(-1, info_address, err_msg("#%d <%s>", frame_no, info), 2); @@ -1575,7 +1575,7 @@ void FrameValues::print_on(JavaThread* thread, outputStream* st) { intptr_t* v0 = _values.at(min_index).location; intptr_t* v1 = _values.at(max_index).location; - if (thread != NULL) { + if (thread != nullptr) { if (thread == Thread::current()) { while (!thread->is_in_live_stack((address)v0)) v0 = _values.at(++min_index).location; while (!thread->is_in_live_stack((address)v1)) v1 = _values.at(--max_index).location; @@ -1608,7 +1608,7 @@ void FrameValues::print_on(outputStream* st, int min_index, int max_index, intpt intptr_t* min = MIN2(v0, v1); intptr_t* max = MAX2(v0, v1); intptr_t* cur = max; - intptr_t* last = NULL; + intptr_t* last = nullptr; for (int i = max_index; i >= min_index; i--) { FrameValue fv = _values.at(i); while (cur > fv.location) { diff --git a/src/hotspot/share/runtime/frame.hpp b/src/hotspot/share/runtime/frame.hpp index 7a158c06cd52e..2b62b4e1c730f 100644 --- a/src/hotspot/share/runtime/frame.hpp +++ b/src/hotspot/share/runtime/frame.hpp @@ -147,7 +147,7 @@ class frame { // inline void set_cb(CodeBlob* cb); const ImmutableOopMap* oop_map() const { - if (_oop_map == NULL) { + if (_oop_map == nullptr) { _oop_map = get_oop_map(); } return _oop_map; @@ -159,10 +159,10 @@ class frame { // Every frame needs to return a unique id which distinguishes it from all other frames. // For sparc and ia32 use sp. ia64 can have memory frames that are empty so multiple frames // will have identical sp values. For ia64 the bsp (fp) value will serve. No real frame - // should have an id() of NULL so it is a distinguishing value for an unmatchable frame. + // should have an id() of null so it is a distinguishing value for an unmatchable frame. // We also have relationals which allow comparing a frame to anoth frame's id() allow // us to distinguish younger (more recent activation) from older (less recent activations) - // A NULL id is only valid when comparing for equality. + // A null id is only valid when comparing for equality. intptr_t* id(void) const; bool is_younger(intptr_t* id) const; @@ -175,7 +175,7 @@ class frame { bool equal(frame other) const; // type testers - bool is_empty() const { return _pc == NULL; } + bool is_empty() const { return _pc == nullptr; } bool is_interpreted_frame() const; bool is_java_frame() const; bool is_entry_frame() const; // Java frame called from C? @@ -415,7 +415,7 @@ class frame { void describe_pd(FrameValues& values, int frame_no); public: - void print_value() const { print_value_on(tty,NULL); } + void print_value() const { print_value_on(tty,nullptr); } void print_value_on(outputStream* st, JavaThread *thread) const; void print_on(outputStream* st) const; void interpreter_frame_print_on(outputStream* st) const; @@ -423,7 +423,7 @@ class frame { static void print_C_frame(outputStream* st, char* buf, int buflen, address pc); // Add annotated descriptions of memory locations belonging to this frame to values - void describe(FrameValues& values, int frame_no, const RegisterMap* reg_map=NULL); + void describe(FrameValues& values, int frame_no, const RegisterMap* reg_map=nullptr); // Conversion from a VMReg to physical stack location template @@ -457,7 +457,7 @@ class frame { #else DerivedPointerIterationMode dpim = DerivedPointerIterationMode::_ignore;; #endif - oops_do_internal(f, cf, NULL, dpim, map, true); + oops_do_internal(f, cf, nullptr, dpim, map, true); } void oops_do(OopClosure* f, CodeBlobClosure* cf, DerivedOopClosure* df, const RegisterMap* map) { @@ -466,7 +466,7 @@ class frame { void oops_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* map, DerivedPointerIterationMode derived_mode) const { - oops_do_internal(f, cf, NULL, derived_mode, map, true); + oops_do_internal(f, cf, nullptr, derived_mode, map, true); } void nmethods_do(CodeBlobClosure* cf) const; @@ -494,8 +494,8 @@ class FrameValue { int priority; FrameValue() { - location = NULL; - description = NULL; + location = nullptr; + description = nullptr; owner = -1; priority = 0; } diff --git a/src/hotspot/share/runtime/frame.inline.hpp b/src/hotspot/share/runtime/frame.inline.hpp index 8fb0e6ccf564a..b6116a0341d7c 100644 --- a/src/hotspot/share/runtime/frame.inline.hpp +++ b/src/hotspot/share/runtime/frame.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -49,7 +49,7 @@ inline bool frame::is_entry_frame() const { } inline bool frame::is_stub_frame() const { - return StubRoutines::is_stub_code(pc()) || (_cb != NULL && _cb->is_adapter_blob()); + return StubRoutines::is_stub_code(pc()) || (_cb != nullptr && _cb->is_adapter_blob()); } inline bool frame::is_first_frame() const { @@ -59,11 +59,11 @@ inline bool frame::is_first_frame() const { } inline bool frame::is_upcall_stub_frame() const { - return _cb != NULL && _cb->is_upcall_stub(); + return _cb != nullptr && _cb->is_upcall_stub(); } inline bool frame::is_compiled_frame() const { - if (_cb != NULL && + if (_cb != nullptr && _cb->is_compiled() && ((CompiledMethod*)_cb)->is_java_method()) { return true; @@ -82,7 +82,7 @@ inline address frame::oopmapreg_to_location(VMReg reg, const RegisterMapT* reg_m return (address)((intptr_t)reg_map->as_RegisterMap()->stack_chunk()->relativize_usp_offset(*this, sp_offset_in_bytes)); } address usp = (address)unextended_sp(); - assert(reg_map->thread() == NULL || reg_map->thread()->is_in_usable_stack(usp), INTPTR_FORMAT, p2i(usp)); + assert(reg_map->thread() == nullptr || reg_map->thread()->is_in_usable_stack(usp), INTPTR_FORMAT, p2i(usp)); return (usp + sp_offset_in_bytes); } } @@ -93,11 +93,11 @@ inline oop* frame::oopmapreg_to_oop_location(VMReg reg, const RegisterMapT* reg_ } inline CodeBlob* frame::get_cb() const { - // if (_cb == NULL) _cb = CodeCache::find_blob(_pc); - if (_cb == NULL) { + // if (_cb == nullptr) _cb = CodeCache::find_blob(_pc); + if (_cb == nullptr) { int slot; _cb = CodeCache::find_blob_and_oopmap(_pc, slot); - if (_oop_map == NULL && slot >= 0) { + if (_oop_map == nullptr && slot >= 0) { _oop_map = _cb->oop_map_for_slot(slot, _pc); } } diff --git a/src/hotspot/share/runtime/globals.hpp b/src/hotspot/share/runtime/globals.hpp index 10aa5ec226dc2..f018054abf567 100644 --- a/src/hotspot/share/runtime/globals.hpp +++ b/src/hotspot/share/runtime/globals.hpp @@ -544,7 +544,7 @@ const int ObjectAlignmentInBytes = 8; "Dump heap to file when java.lang.OutOfMemoryError is thrown " \ "from JVM") \ \ - product(ccstr, HeapDumpPath, NULL, MANAGEABLE, \ + product(ccstr, HeapDumpPath, nullptr, MANAGEABLE, \ "When HeapDumpOnOutOfMemoryError is on, the path (filename or " \ "directory) of the dump file (defaults to java_pid.hprof " \ "in the working directory)") \ @@ -598,7 +598,7 @@ const int ObjectAlignmentInBytes = 8; product(bool, PrintAssembly, false, DIAGNOSTIC, \ "Print assembly code (using external disassembler.so)") \ \ - product(ccstr, PrintAssemblyOptions, NULL, DIAGNOSTIC, \ + product(ccstr, PrintAssemblyOptions, nullptr, DIAGNOSTIC, \ "Print options string passed to disassembler.so") \ \ notproduct(bool, PrintNMethodStatistics, false, \ @@ -626,7 +626,7 @@ const int ObjectAlignmentInBytes = 8; "Exercise compiled exception handlers") \ \ develop(bool, InterceptOSException, false, \ - "Start debugger when an implicit OS (e.g. NULL) " \ + "Start debugger when an implicit OS (e.g. nullptr) " \ "exception happens") \ \ product(bool, PrintCodeCache, false, \ @@ -832,7 +832,7 @@ const int ObjectAlignmentInBytes = 8; develop(bool, StressRewriter, false, \ "Stress linktime bytecode rewriting") \ \ - product(ccstr, TraceJVMTI, NULL, \ + product(ccstr, TraceJVMTI, nullptr, \ "Trace flags for JVMTI functions and events") \ \ product(bool, StressLdcRewrite, false, DIAGNOSTIC, \ @@ -1032,11 +1032,11 @@ const int ObjectAlignmentInBytes = 8; product(bool, LogVMOutput, false, DIAGNOSTIC, \ "Save VM output to LogFile") \ \ - product(ccstr, LogFile, NULL, DIAGNOSTIC, \ + product(ccstr, LogFile, nullptr, DIAGNOSTIC, \ "If LogVMOutput or LogCompilation is on, save VM output to " \ "this file [default: ./hotspot_pid%p.log] (%p replaced with pid)")\ \ - product(ccstr, ErrorFile, NULL, \ + product(ccstr, ErrorFile, nullptr, \ "If an error occurs, save the error data to this file " \ "[default: ./hs_err_pid%p.log] (%p replaced with pid)") \ \ @@ -1073,11 +1073,11 @@ const int ObjectAlignmentInBytes = 8; notproduct(bool, PrintSymbolTableSizeHistogram, false, \ "print histogram of the symbol table") \ \ - product(ccstr, AbortVMOnException, NULL, DIAGNOSTIC, \ + product(ccstr, AbortVMOnException, nullptr, DIAGNOSTIC, \ "Call fatal if this exception is thrown. Example: " \ "java -XX:AbortVMOnException=java.lang.NullPointerException Foo") \ \ - product(ccstr, AbortVMOnExceptionMessage, NULL, DIAGNOSTIC, \ + product(ccstr, AbortVMOnExceptionMessage, nullptr, DIAGNOSTIC, \ "Call fatal if the exception pointed by AbortVMOnException " \ "has this message") \ \ @@ -1741,7 +1741,7 @@ const int ObjectAlignmentInBytes = 8; product(bool, PerfDataSaveToFile, false, \ "Save PerfData memory to hsperfdata_ file on exit") \ \ - product(ccstr, PerfDataSaveFile, NULL, \ + product(ccstr, PerfDataSaveFile, nullptr, \ "Save PerfData memory to the specified absolute pathname. " \ "The string %p in the file name (if present) " \ "will be replaced by pid") \ @@ -1821,7 +1821,7 @@ const int ObjectAlignmentInBytes = 8; "Causes the VM to pause at startup time and wait for the pause " \ "file to be removed (default: ./vm.paused.)") \ \ - product(ccstr, PauseAtStartupFile, NULL, DIAGNOSTIC, \ + product(ccstr, PauseAtStartupFile, nullptr, DIAGNOSTIC, \ "The file to create and for whose removal to await when pausing " \ "at startup. (default: ./vm.paused.)") \ \ @@ -1928,7 +1928,7 @@ const int ObjectAlignmentInBytes = 8; range(0, max_intx) \ constraint(InitArrayShortSizeConstraintFunc, AfterErgo) \ \ - product(ccstr, AllocateHeapAt, NULL, \ + product(ccstr, AllocateHeapAt, nullptr, \ "Path to the directory where a temporary file will be created " \ "to use as the backing store for Java Heap.") \ \ @@ -1963,10 +1963,10 @@ const int ObjectAlignmentInBytes = 8; JFR_ONLY(product(bool, FlightRecorder, false, \ "(Deprecated) Enable Flight Recorder")) \ \ - JFR_ONLY(product(ccstr, FlightRecorderOptions, NULL, \ + JFR_ONLY(product(ccstr, FlightRecorderOptions, nullptr, \ "Flight Recorder options")) \ \ - JFR_ONLY(product(ccstr, StartFlightRecording, NULL, \ + JFR_ONLY(product(ccstr, StartFlightRecording, nullptr, \ "Start flight recording with options")) \ \ product(bool, UseFastUnorderedTimeStamps, false, EXPERIMENTAL, \ diff --git a/src/hotspot/share/runtime/handles.cpp b/src/hotspot/share/runtime/handles.cpp index c8e7005221002..bef4736e443fb 100644 --- a/src/hotspot/share/runtime/handles.cpp +++ b/src/hotspot/share/runtime/handles.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,7 +44,7 @@ oop* HandleArea::allocate_handle(oop obj) { oop* HandleArea::allocate_null_handle() { assert_handle_mark_nesting(); - return real_allocate_handle(NULL); + return real_allocate_handle(nullptr); } #endif @@ -53,9 +53,9 @@ oop* HandleArea::allocate_null_handle() { #define DEF_METADATA_HANDLE_FN_NOINLINE(name, type) \ name##Handle::name##Handle(const name##Handle &h) { \ _value = h._value; \ - if (_value != NULL) { \ + if (_value != nullptr) { \ assert(_value->is_valid(), "obj is valid"); \ - if (h._thread != NULL) { \ + if (h._thread != nullptr) { \ assert(h._thread == Thread::current(), "thread must be current");\ _thread = h._thread; \ } else { \ @@ -64,15 +64,15 @@ name##Handle::name##Handle(const name##Handle &h) { \ assert(_thread->is_in_live_stack((address)this), "not on stack?"); \ _thread->metadata_handles()->push((Metadata*)_value); \ } else { \ - _thread = NULL; \ + _thread = nullptr; \ } \ } \ name##Handle& name##Handle::operator=(const name##Handle &s) { \ remove(); \ _value = s._value; \ - if (_value != NULL) { \ + if (_value != nullptr) { \ assert(_value->is_valid(), "obj is valid"); \ - if (s._thread != NULL) { \ + if (s._thread != nullptr) { \ assert(s._thread == Thread::current(), "thread must be current");\ _thread = s._thread; \ } else { \ @@ -81,12 +81,12 @@ name##Handle& name##Handle::operator=(const name##Handle &s) { \ assert(_thread->is_in_live_stack((address)this), "not on stack?"); \ _thread->metadata_handles()->push((Metadata*)_value); \ } else { \ - _thread = NULL; \ + _thread = nullptr; \ } \ return *this; \ } \ inline void name##Handle::remove() { \ - if (_value != NULL) { \ + if (_value != nullptr) { \ int i = _thread->metadata_handles()->find_from_end((Metadata*)_value); \ assert(i!=-1, "not in metadata_handles list"); \ _thread->metadata_handles()->remove_at(i); \ @@ -122,7 +122,7 @@ void HandleArea::oops_do(OopClosure* f) { k = k->next(); } - if (_prev != NULL) _prev->oops_do(f); + if (_prev != nullptr) _prev->oops_do(f); } void HandleMark::initialize(Thread* thread) { diff --git a/src/hotspot/share/runtime/handles.hpp b/src/hotspot/share/runtime/handles.hpp index fa3f5cca589e5..5cab1a25340f6 100644 --- a/src/hotspot/share/runtime/handles.hpp +++ b/src/hotspot/share/runtime/handles.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -67,12 +67,12 @@ class Handle { oop* _handle; protected: - oop obj() const { return _handle == NULL ? (oop)NULL : *_handle; } - oop non_null_obj() const { assert(_handle != NULL, "resolving NULL handle"); return *_handle; } + oop obj() const { return _handle == nullptr ? (oop)nullptr : *_handle; } + oop non_null_obj() const { assert(_handle != nullptr, "resolving nullptr handle"); return *_handle; } public: // Constructors - Handle() { _handle = NULL; } + Handle() { _handle = nullptr; } inline Handle(Thread* thread, oop obj); // General access @@ -85,8 +85,8 @@ class Handle { bool operator != (const Handle& h) const { return obj() != h.obj(); } // Null checks - bool is_null() const { return _handle == NULL; } - bool not_null() const { return _handle != NULL; } + bool is_null() const { return _handle == nullptr; } + bool not_null() const { return _handle != nullptr; } // Debugging void print() { obj()->print(); } @@ -99,7 +99,7 @@ class Handle { // Raw handle access. Allows easy duplication of Handles. This can be very unsafe // since duplicates is only valid as long as original handle is alive. oop* raw_value() const { return _handle; } - static oop raw_resolve(oop *handle) { return handle == NULL ? (oop)NULL : *handle; } + static oop raw_resolve(oop *handle) { return handle == nullptr ? (oop)nullptr : *handle; } inline void replace(oop obj); }; @@ -144,11 +144,11 @@ DEF_HANDLE(typeArray , is_typeArray_noinline ) Thread* _thread; \ protected: \ type* obj() const { return _value; } \ - type* non_null_obj() const { assert(_value != NULL, "resolving NULL _value"); return _value; } \ + type* non_null_obj() const { assert(_value != nullptr, "resolving nullptr _value"); return _value; } \ \ public: \ /* Constructors */ \ - name##Handle () : _value(NULL), _thread(NULL) {} \ + name##Handle () : _value(nullptr), _thread(nullptr) {} \ name##Handle (Thread* thread, type* obj); \ \ name##Handle (const name##Handle &h); \ @@ -166,8 +166,8 @@ DEF_HANDLE(typeArray , is_typeArray_noinline ) bool operator == (const name##Handle& h) const { return obj() == h.obj(); } \ \ /* Null checks */ \ - bool is_null() const { return _value == NULL; } \ - bool not_null() const { return _value != NULL; } \ + bool is_null() const { return _value == nullptr; } \ + bool not_null() const { return _value != nullptr; } \ }; diff --git a/src/hotspot/share/runtime/handles.inline.hpp b/src/hotspot/share/runtime/handles.inline.hpp index 8e9f09231fe83..669a940eca961 100644 --- a/src/hotspot/share/runtime/handles.inline.hpp +++ b/src/hotspot/share/runtime/handles.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,8 +36,8 @@ inline Handle::Handle(Thread* thread, oop obj) { assert(thread == Thread::current(), "sanity check"); - if (obj == NULL) { - _handle = NULL; + if (obj == nullptr) { + _handle = nullptr; } else { _handle = thread->handle_area()->allocate_handle(obj); } @@ -47,7 +47,7 @@ inline void Handle::replace(oop obj) { // Unlike in OopHandle::replace, we shouldn't use a barrier here. // OopHandle has its storage in OopStorage, which is walked concurrently and uses barriers. // Handle is thread private, and iterated by Thread::oops_do, which is why it shouldn't have any barriers at all. - assert(_handle != NULL, "should not use replace"); + assert(_handle != nullptr, "should not use replace"); *_handle = obj; } @@ -65,7 +65,7 @@ DEF_HANDLE_CONSTR(typeArray, is_typeArray_noinline) // Constructor for metadata handles #define DEF_METADATA_HANDLE_FN(name, type) \ inline name##Handle::name##Handle(Thread* thread, type* obj) : _value(obj), _thread(thread) { \ - if (obj != NULL) { \ + if (obj != nullptr) { \ assert(((Metadata*)obj)->is_valid(), "obj is valid"); \ assert(_thread == Thread::current(), "thread must be current"); \ assert(_thread->is_in_live_stack((address)this), "not on stack?"); \ @@ -85,7 +85,7 @@ inline void HandleMark::push() { inline void HandleMark::pop_and_restore() { // Delete later chunks - if(_chunk->next() != NULL) { + if(_chunk->next() != nullptr) { assert(_area->size_in_bytes() > size_in_bytes(), "Sanity check"); chop_later_chunks(); } else { diff --git a/src/hotspot/share/runtime/handshake.cpp b/src/hotspot/share/runtime/handshake.cpp index a04b692c2b366..9afe25ac58cb7 100644 --- a/src/hotspot/share/runtime/handshake.cpp +++ b/src/hotspot/share/runtime/handshake.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -90,7 +90,7 @@ class AsyncHandshakeOperation : public HandshakeOperation { jlong _start_time_ns; public: AsyncHandshakeOperation(AsyncHandshakeClosure* cl, JavaThread* target, jlong start_ns) - : HandshakeOperation(cl, target, NULL), _start_time_ns(start_ns) {} + : HandshakeOperation(cl, target, nullptr), _start_time_ns(start_ns) {} virtual ~AsyncHandshakeOperation() { delete _handshake_cl; } jlong start_time() const { return _start_time_ns; } }; @@ -188,7 +188,7 @@ static void handle_timeout(HandshakeOperation* op, JavaThread* target) { log_error(handshake)("Handshake timeout: %s(" INTPTR_FORMAT "), pending threads: " INT32_FORMAT, op->name(), p2i(op), op->pending_threads()); - if (target == NULL) { + if (target == nullptr) { for ( ; JavaThread* thr = jtiwh.next(); ) { if (thr->handshake_state()->operation_pending(op)) { log_error(handshake)("JavaThread " INTPTR_FORMAT " has not cleared handshake op: " INTPTR_FORMAT, p2i(thr), p2i(op)); @@ -200,7 +200,7 @@ static void handle_timeout(HandshakeOperation* op, JavaThread* target) { log_error(handshake)("JavaThread " INTPTR_FORMAT " has not cleared handshake op: " INTPTR_FORMAT, p2i(target), p2i(op)); } - if (target != NULL) { + if (target != nullptr) { if (os::signal_thread(target, SIGILL, "cannot be handshaked")) { // Give target a chance to report the error and terminate the VM. os::naked_sleep(3000); @@ -211,7 +211,7 @@ static void handle_timeout(HandshakeOperation* op, JavaThread* target) { fatal("Handshake timeout"); } -static void check_handshake_timeout(jlong start_time, HandshakeOperation* op, JavaThread* target = NULL) { +static void check_handshake_timeout(jlong start_time, HandshakeOperation* op, JavaThread* target = nullptr) { // Check if handshake operation has timed out jlong timeout_ns = millis_to_nanos(HandshakeTimeout); if (timeout_ns > 0) { @@ -221,15 +221,15 @@ static void check_handshake_timeout(jlong start_time, HandshakeOperation* op, Ja } } -static void log_handshake_info(jlong start_time_ns, const char* name, int targets, int emitted_handshakes_executed, const char* extra = NULL) { +static void log_handshake_info(jlong start_time_ns, const char* name, int targets, int emitted_handshakes_executed, const char* extra = nullptr) { if (log_is_enabled(Info, handshake)) { jlong completion_time = os::javaTimeNanos() - start_time_ns; log_info(handshake)("Handshake \"%s\", Targeted threads: %d, Executed by requesting thread: %d, Total completion time: " JLONG_FORMAT " ns%s%s", name, targets, emitted_handshakes_executed, completion_time, - extra != NULL ? ", " : "", - extra != NULL ? extra : ""); + extra != nullptr ? ", " : "", + extra != nullptr ? extra : ""); } } @@ -245,7 +245,7 @@ class VM_HandshakeAllThreads: public VM_Operation { JavaThreadIteratorWithHandle jtiwh; int number_of_threads_issued = 0; - for (JavaThread* thr = jtiwh.next(); thr != NULL; thr = jtiwh.next()) { + for (JavaThread* thr = jtiwh.next(); thr != nullptr; thr = jtiwh.next()) { thr->handshake_state()->add_operation(_op); number_of_threads_issued++; } @@ -273,7 +273,7 @@ class VM_HandshakeAllThreads: public VM_Operation { // Observing a blocked state may of course be transient but the processing is guarded // by mutexes and we optimistically begin by working on the blocked threads jtiwh.rewind(); - for (JavaThread* thr = jtiwh.next(); thr != NULL; thr = jtiwh.next()) { + for (JavaThread* thr = jtiwh.next(); thr != nullptr; thr = jtiwh.next()) { // A new thread on the ThreadsList will not have an operation, // hence it is skipped in handshake_try_process. HandshakeState::ProcessResult pr = thr->handshake_state()->try_process(_op); @@ -306,7 +306,7 @@ void HandshakeOperation::prepare(JavaThread* current_target, Thread* executing_t // Only when the target is not executing the handshake itself. StackWatermarkSet::start_processing(current_target, StackWatermarkKind::gc); } - if (_requester != NULL && _requester != executing_thread && _requester->is_Java_thread()) { + if (_requester != nullptr && _requester != executing_thread && _requester->is_Java_thread()) { // The handshake closure may contain oop Handles from the _requester. // We must make sure we can use them. StackWatermarkSet::start_processing(JavaThread::cast(_requester), StackWatermarkKind::gc); @@ -343,7 +343,7 @@ void HandshakeOperation::do_handshake(JavaThread* thread) { } void Handshake::execute(HandshakeClosure* hs_cl) { - HandshakeOperation cto(hs_cl, NULL, Thread::current()); + HandshakeOperation cto(hs_cl, nullptr, Thread::current()); VM_HandshakeAllThreads handshake(&cto); VMThread::execute(&handshake); } @@ -496,13 +496,13 @@ HandshakeOperation* HandshakeState::get_op_for_self(bool allow_suspend, bool che bool HandshakeState::has_operation(bool allow_suspend, bool check_async_exception) { MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag); - return get_op_for_self(allow_suspend, check_async_exception) != NULL; + return get_op_for_self(allow_suspend, check_async_exception) != nullptr; } bool HandshakeState::has_async_exception_operation() { if (!has_operation()) return false; - MutexLocker ml(_lock.owned_by_self() ? NULL : &_lock, Mutex::_no_safepoint_check_flag); - return _queue.peek(async_exception_filter) != NULL; + MutexLocker ml(_lock.owned_by_self() ? nullptr : &_lock, Mutex::_no_safepoint_check_flag); + return _queue.peek(async_exception_filter) != nullptr; } void HandshakeState::clean_async_exception_operation() { @@ -548,8 +548,8 @@ bool HandshakeState::process_by_self(bool allow_suspend, bool check_async_except MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag); HandshakeOperation* op = get_op_for_self(allow_suspend, check_async_exception); - if (op != NULL) { - assert(op->_target == NULL || op->_target == Thread::current(), "Wrong thread"); + if (op != nullptr) { + assert(op->_target == nullptr || op->_target == Thread::current(), "Wrong thread"); bool async = op->is_async(); log_trace(handshake)("Proc handshake %s " INTPTR_FORMAT " on " INTPTR_FORMAT " by self", async ? "asynchronous" : "synchronous", p2i(op), p2i(_handshakee)); @@ -646,9 +646,9 @@ HandshakeState::ProcessResult HandshakeState::try_process(HandshakeOperation* ma HandshakeOperation* op = get_op(); - assert(op != NULL, "Must have an op"); + assert(op != nullptr, "Must have an op"); assert(SafepointMechanism::local_poll_armed(_handshakee), "Must be"); - assert(op->_target == NULL || _handshakee == op->_target, "Wrong thread"); + assert(op->_target == nullptr || _handshakee == op->_target, "Wrong thread"); log_trace(handshake)("Processing handshake " INTPTR_FORMAT " by %s(%s)", p2i(op), op == match_op ? "handshaker" : "cooperative", @@ -658,7 +658,7 @@ HandshakeState::ProcessResult HandshakeState::try_process(HandshakeOperation* ma set_active_handshaker(current_thread); op->do_handshake(_handshakee); // acquire, op removed after - set_active_handshaker(NULL); + set_active_handshaker(nullptr); remove_op(op); _lock.unlock(); @@ -703,7 +703,7 @@ class ThreadSelfSuspensionHandshake : public AsyncHandshakeClosure { }; bool HandshakeState::suspend_with_handshake() { - assert(_handshakee->threadObj() != NULL, "cannot suspend with a NULL threadObj"); + assert(_handshakee->threadObj() != nullptr, "cannot suspend with a null threadObj"); if (_handshakee->is_exiting()) { log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " exiting", p2i(_handshakee)); return false; diff --git a/src/hotspot/share/runtime/icache.cpp b/src/hotspot/share/runtime/icache.cpp index 999c03f6fa999..bce33201073bc 100644 --- a/src/hotspot/share/runtime/icache.cpp +++ b/src/hotspot/share/runtime/icache.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,14 +29,14 @@ #include "utilities/align.hpp" // The flush stub function address -AbstractICache::flush_icache_stub_t AbstractICache::_flush_icache_stub = NULL; +AbstractICache::flush_icache_stub_t AbstractICache::_flush_icache_stub = nullptr; void AbstractICache::initialize() { // Making this stub must be FIRST use of assembler ResourceMark rm; BufferBlob* b = BufferBlob::create("flush_icache_stub", ICache::stub_size); - if (b == NULL) { + if (b == nullptr) { vm_exit_out_of_memory(ICache::stub_size, OOM_MALLOC_ERROR, "CodeCache: no space for flush_icache_stub"); } CodeBuffer c(b); diff --git a/src/hotspot/share/runtime/interfaceSupport.cpp b/src/hotspot/share/runtime/interfaceSupport.cpp index 951e0e0835ad7..a58c6b0236ee7 100644 --- a/src/hotspot/share/runtime/interfaceSupport.cpp +++ b/src/hotspot/share/runtime/interfaceSupport.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -86,7 +86,7 @@ void InterfaceSupport::gc_alot() { if (!thread->is_Java_thread()) return; // Avoid concurrent calls // Check for new, not quite initialized thread. A thread in new mode cannot initiate a GC. JavaThread *current_thread = JavaThread::cast(thread); - if (current_thread->active_handles() == NULL) return; + if (current_thread->active_handles() == nullptr) return; // Short-circuit any possible re-entrant gc-a-lot attempt if (thread->skip_gcalot()) return; @@ -220,7 +220,7 @@ void InterfaceSupport::verify_stack() { // In case of exceptions we might not have a runtime_stub on // top of stack, hence, all callee-saved registers are not going // to be setup correctly, hence, we cannot do stack verify - if (cb != NULL && !(cb->is_runtime_stub() || cb->is_uncommon_trap_stub())) return; + if (cb != nullptr && !(cb->is_runtime_stub() || cb->is_uncommon_trap_stub())) return; for (; !sfs.is_done(); sfs.next()) { sfs.current()->verify(sfs.register_map()); diff --git a/src/hotspot/share/runtime/interfaceSupport.inline.hpp b/src/hotspot/share/runtime/interfaceSupport.inline.hpp index 063d7385b6099..dd43537c4c241 100644 --- a/src/hotspot/share/runtime/interfaceSupport.inline.hpp +++ b/src/hotspot/share/runtime/interfaceSupport.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -80,7 +80,7 @@ class ThreadStateTransition : public StackObj { public: ThreadStateTransition(JavaThread *thread) : _thread(thread) { - assert(thread != NULL, "must be active Java thread"); + assert(thread != nullptr, "must be active Java thread"); assert(thread == Thread::current(), "must be current thread"); } @@ -145,7 +145,7 @@ class ThreadInVMfromJava : public ThreadStateTransition { class ThreadInVMfromUnknown { JavaThread* _thread; public: - ThreadInVMfromUnknown() : _thread(NULL) { + ThreadInVMfromUnknown() : _thread(nullptr) { Thread* t = Thread::current(); if (t->is_Java_thread()) { JavaThread* t2 = JavaThread::cast(t); diff --git a/src/hotspot/share/runtime/java.cpp b/src/hotspot/share/runtime/java.cpp index 74078cc43b5f6..ba5e0c5dcc63b 100644 --- a/src/hotspot/share/runtime/java.cpp +++ b/src/hotspot/share/runtime/java.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -107,7 +107,7 @@ int compare_methods(Method** a, Method** b) { void collect_profiled_methods(Method* m) { Thread* thread = Thread::current(); methodHandle mh(thread, m); - if ((m->method_data() != NULL) && + if ((m->method_data() != nullptr) && (PrintMethodData || CompilerOracle::should_print(mh))) { collected_profiled_methods->push(m); } @@ -132,7 +132,7 @@ void print_method_profiling_data() { tty->print_cr(" mdo size: %d bytes", m->method_data()->size_in_bytes()); tty->cr(); // Dump data on parameters if any - if (m->method_data() != NULL && m->method_data()->parameters_type_data() != NULL) { + if (m->method_data() != nullptr && m->method_data()->parameters_type_data() != nullptr) { tty->fill_to(2); m->method_data()->parameters_type_data()->print_data_on(tty); } @@ -295,7 +295,7 @@ void print_statistics() { // CodeHeap State Analytics. if (PrintCodeHeapAnalytics) { - CompileBroker::print_heapinfo(NULL, "all", 4096); // details + CompileBroker::print_heapinfo(nullptr, "all", 4096); // details } if (PrintCodeCache2) { @@ -360,7 +360,7 @@ void print_statistics() { // CodeHeap State Analytics. if (PrintCodeHeapAnalytics) { - CompileBroker::print_heapinfo(NULL, "all", 4096); // details + CompileBroker::print_heapinfo(nullptr, "all", 4096); // details } #ifdef COMPILER2 @@ -491,7 +491,7 @@ void before_exit(JavaThread* thread, bool halt) { #if INCLUDE_CDS if (DynamicArchive::should_dump_at_vm_exit()) { - assert(ArchiveClassesAtExit != NULL, "Must be already set"); + assert(ArchiveClassesAtExit != nullptr, "Must be already set"); ExceptionMark em(thread); DynamicArchive::dump(ArchiveClassesAtExit, thread); if (thread->has_pending_exception()) { @@ -527,8 +527,8 @@ void before_exit(JavaThread* thread, bool halt) { void vm_exit(int code) { Thread* thread = - ThreadLocalStorage::is_initialized() ? Thread::current_or_null() : NULL; - if (thread == NULL) { + ThreadLocalStorage::is_initialized() ? Thread::current_or_null() : nullptr; + if (thread == nullptr) { // very early initialization failure -- just exit vm_direct_exit(code); } @@ -538,7 +538,7 @@ void vm_exit(int code) { // XML termination logging safe is tied to the termination of the // VMThread, and it doesn't terminate on this exit path. See 8222534. - if (VMThread::vm_thread() != NULL) { + if (VMThread::vm_thread() != nullptr) { if (thread->is_Java_thread()) { // We must be "in_vm" for the code below to work correctly. // Historically there must have been some exit path for which @@ -589,7 +589,7 @@ void vm_direct_exit(int code, const char* message) { void vm_perform_shutdown_actions() { if (is_init_completed()) { Thread* thread = Thread::current_or_null(); - if (thread != NULL && thread->is_Java_thread()) { + if (thread != nullptr && thread->is_Java_thread()) { // We are leaving the VM, set state to native (in case any OS exit // handlers call back to the VM) JavaThread* jt = JavaThread::cast(thread); @@ -622,10 +622,10 @@ void vm_abort(bool dump_core) { } void vm_notify_during_cds_dumping(const char* error, const char* message) { - if (error != NULL) { + if (error != nullptr) { tty->print_cr("Error occurred during CDS dumping"); tty->print("%s", error); - if (message != NULL) { + if (message != nullptr) { tty->print_cr(": %s", message); } else { @@ -642,10 +642,10 @@ void vm_exit_during_cds_dumping(const char* error, const char* message) { } void vm_notify_during_shutdown(const char* error, const char* message) { - if (error != NULL) { + if (error != nullptr) { tty->print_cr("Error occurred during initialization of VM"); tty->print("%s", error); - if (message != NULL) { + if (message != nullptr) { tty->print_cr(": %s", message); } else { @@ -658,7 +658,7 @@ void vm_notify_during_shutdown(const char* error, const char* message) { } void vm_exit_during_initialization() { - vm_notify_during_shutdown(NULL, NULL); + vm_notify_during_shutdown(nullptr, nullptr); // Failure during initialization, we don't want to dump core vm_abort(false); @@ -669,13 +669,13 @@ void vm_exit_during_initialization(Handle exception) { // If there are exceptions on this thread it must be cleared // first and here. Any future calls to EXCEPTION_MARK requires // that no pending exceptions exist. - JavaThread* THREAD = JavaThread::current(); // can't be NULL + JavaThread* THREAD = JavaThread::current(); // can't be null if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; } java_lang_Throwable::print_stack_trace(exception, tty); tty->cr(); - vm_notify_during_shutdown(NULL, NULL); + vm_notify_during_shutdown(nullptr, nullptr); // Failure during initialization, we don't want to dump core vm_abort(false); diff --git a/src/hotspot/share/runtime/java.hpp b/src/hotspot/share/runtime/java.hpp index 93978411bfcc0..450be40fc82c8 100644 --- a/src/hotspot/share/runtime/java.hpp +++ b/src/hotspot/share/runtime/java.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -54,10 +54,10 @@ extern void notify_vm_shutdown(); extern void vm_exit_during_initialization(); extern void vm_exit_during_initialization(Handle exception); extern void vm_exit_during_initialization(Symbol* exception_name, const char* message); -extern void vm_exit_during_initialization(const char* error, const char* message = NULL); -extern void vm_shutdown_during_initialization(const char* error, const char* message = NULL); +extern void vm_exit_during_initialization(const char* error, const char* message = nullptr); +extern void vm_shutdown_during_initialization(const char* error, const char* message = nullptr); -extern void vm_exit_during_cds_dumping(const char* error, const char* message = NULL); +extern void vm_exit_during_cds_dumping(const char* error, const char* message = nullptr); /** * With the integration of the changes to handle the version string diff --git a/src/hotspot/share/runtime/javaCalls.cpp b/src/hotspot/share/runtime/javaCalls.cpp index 856b5aa337487..7245a36dc74bc 100644 --- a/src/hotspot/share/runtime/javaCalls.cpp +++ b/src/hotspot/share/runtime/javaCalls.cpp @@ -85,7 +85,7 @@ JavaCallWrapper::JavaCallWrapper(const methodHandle& callee_method, Handle recei _handles = _thread->active_handles(); // save previous handle block & Java frame linkage // For the profiler, the last_Java_frame information in thread must always be in - // legal state. We have no last Java frame if last_Java_sp == NULL so + // legal state. We have no last Java frame if last_Java_sp == nullptr so // the valid transition is to clear _last_Java_sp and then reset the rest of // the (platform specific) state. @@ -174,7 +174,7 @@ static BasicType runtime_type_from(JavaValue* result) { void JavaCalls::call_virtual(JavaValue* result, Klass* spec_klass, Symbol* name, Symbol* signature, JavaCallArguments* args, TRAPS) { CallInfo callinfo; Handle receiver = args->receiver(); - Klass* recvrKlass = receiver.is_null() ? (Klass*)NULL : receiver->klass(); + Klass* recvrKlass = receiver.is_null() ? (Klass*)nullptr : receiver->klass(); LinkInfo link_info(spec_klass, name, signature); LinkResolver::resolve_virtual_call( callinfo, receiver, recvrKlass, link_info, true, CHECK); @@ -405,8 +405,8 @@ void JavaCalls::call_helper(JavaValue* result, const methodHandle& method, JavaC // Must extract verified entry point from HotSpotNmethod after VM to Java // transition in JavaCallWrapper constructor so that it is safe with // respect to nmethod sweeping. - address verified_entry_point = (address) HotSpotJVMCI::InstalledCode::entryPoint(NULL, alternative_target()); - if (verified_entry_point != NULL) { + address verified_entry_point = (address) HotSpotJVMCI::InstalledCode::entryPoint(nullptr, alternative_target()); + if (verified_entry_point != nullptr) { thread->set_jvmci_alternate_call_target(verified_entry_point); entry_point = method->adapter()->get_i2c_entry(); } @@ -439,7 +439,7 @@ void JavaCalls::call_helper(JavaValue* result, const methodHandle& method, JavaC // Restore possible oop return if (oop_result_flag) { result->set_oop(thread->vm_result()); - thread->set_vm_result(NULL); + thread->set_vm_result(nullptr); } } @@ -471,7 +471,7 @@ inline oop resolve_indirect_oop(intptr_t value, uint state) { default: ShouldNotReachHere(); - return NULL; + return nullptr; } } diff --git a/src/hotspot/share/runtime/javaCalls.hpp b/src/hotspot/share/runtime/javaCalls.hpp index ba7a76fe89f8f..b0b861c4e7add 100644 --- a/src/hotspot/share/runtime/javaCalls.hpp +++ b/src/hotspot/share/runtime/javaCalls.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -66,7 +66,7 @@ class JavaCallWrapper: StackObj { Method* callee_method() { return _callee_method; } void oops_do(OopClosure* f); - bool is_first_frame() const { return _anchor.last_Java_sp() == NULL; } + bool is_first_frame() const { return _anchor.last_Java_sp() == nullptr; } }; diff --git a/src/hotspot/share/runtime/javaFrameAnchor.hpp b/src/hotspot/share/runtime/javaFrameAnchor.hpp index 383851ae9f5d7..a0636bc3d60cb 100644 --- a/src/hotspot/share/runtime/javaFrameAnchor.hpp +++ b/src/hotspot/share/runtime/javaFrameAnchor.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -57,7 +57,7 @@ friend class UpcallLinker; private: // - // Whenever _last_Java_sp != NULL other anchor fields MUST be valid! + // Whenever _last_Java_sp != nullptr other anchor fields MUST be valid! // The stack may not be walkable [check with walkable() ] but the values must be valid. // The profiler apparently depends on this. // @@ -72,14 +72,14 @@ friend class UpcallLinker; volatile address _last_Java_pc; // tells whether the last Java frame is set - // It is important that when last_Java_sp != NULL that the rest of the frame + // It is important that when last_Java_sp != nullptr that the rest of the frame // anchor (including platform specific) all be valid. - bool has_last_Java_frame() const { return _last_Java_sp != NULL; } - // This is very dangerous unless sp == NULL + bool has_last_Java_frame() const { return _last_Java_sp != nullptr; } + // This is very dangerous unless sp == nullptr // Invalidate the anchor so that has_last_frame is false // and no one should look at the other fields. - void zap(void) { _last_Java_sp = NULL; } + void zap(void) { _last_Java_sp = nullptr; } #include CPU_HEADER(javaFrameAnchor) diff --git a/src/hotspot/share/runtime/javaThread.cpp b/src/hotspot/share/runtime/javaThread.cpp index 701b538ea7b1e..aca60beba0640 100644 --- a/src/hotspot/share/runtime/javaThread.cpp +++ b/src/hotspot/share/runtime/javaThread.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -146,19 +146,19 @@ void JavaThread::smr_delete() { } // Initialized by VMThread at vm_global_init -OopStorage* JavaThread::_thread_oop_storage = NULL; +OopStorage* JavaThread::_thread_oop_storage = nullptr; OopStorage* JavaThread::thread_oop_storage() { - assert(_thread_oop_storage != NULL, "not yet initialized"); + assert(_thread_oop_storage != nullptr, "not yet initialized"); return _thread_oop_storage; } void JavaThread::set_threadOopHandles(oop p) { - assert(_thread_oop_storage != NULL, "not yet initialized"); + assert(_thread_oop_storage != nullptr, "not yet initialized"); _threadObj = OopHandle(_thread_oop_storage, p); _vthread = OopHandle(_thread_oop_storage, p); - _jvmti_vthread = OopHandle(_thread_oop_storage, NULL); - _scopedValueCache = OopHandle(_thread_oop_storage, NULL); + _jvmti_vthread = OopHandle(_thread_oop_storage, nullptr); + _scopedValueCache = OopHandle(_thread_oop_storage, nullptr); } oop JavaThread::threadObj() const { @@ -174,7 +174,7 @@ oop JavaThread::vthread() const { } void JavaThread::set_vthread(oop p) { - assert(_thread_oop_storage != NULL, "not yet initialized"); + assert(_thread_oop_storage != nullptr, "not yet initialized"); _vthread.replace(p); } @@ -183,7 +183,7 @@ oop JavaThread::jvmti_vthread() const { } void JavaThread::set_jvmti_vthread(oop p) { - assert(_thread_oop_storage != NULL, "not yet initialized"); + assert(_thread_oop_storage != nullptr, "not yet initialized"); _jvmti_vthread.replace(p); } @@ -192,19 +192,19 @@ oop JavaThread::scopedValueCache() const { } void JavaThread::set_scopedValueCache(oop p) { - if (_scopedValueCache.ptr_raw() != NULL) { // i.e. if the OopHandle has been allocated + if (_scopedValueCache.ptr_raw() != nullptr) { // i.e. if the OopHandle has been allocated _scopedValueCache.replace(p); } else { - assert(p == NULL, "not yet initialized"); + assert(p == nullptr, "not yet initialized"); } } void JavaThread::clear_scopedValueBindings() { - set_scopedValueCache(NULL); + set_scopedValueCache(nullptr); oop vthread_oop = vthread(); // vthread may be null here if we get a VM error during startup, // before the java.lang.Thread instance has been created. - if (vthread_oop != NULL) { + if (vthread_oop != nullptr) { java_lang_Thread::clear_scopedValueBindings(vthread_oop); } } @@ -212,7 +212,7 @@ void JavaThread::clear_scopedValueBindings() { void JavaThread::allocate_threadObj(Handle thread_group, const char* thread_name, bool daemon, TRAPS) { assert(thread_group.not_null(), "thread group should be specified"); - assert(threadObj() == NULL, "should only create Java thread object once"); + assert(threadObj() == nullptr, "should only create Java thread object once"); InstanceKlass* ik = vmClasses::Thread_klass(); assert(ik->is_initialized(), "must be"); @@ -225,7 +225,7 @@ void JavaThread::allocate_threadObj(Handle thread_group, const char* thread_name set_threadOopHandles(thread_oop()); JavaValue result(T_VOID); - if (thread_name != NULL) { + if (thread_name != nullptr) { Handle name = java_lang_String::create_from_str(thread_name, CHECK); // Thread gets assigned specified name and null target JavaCalls::call_special(&result, @@ -282,10 +282,10 @@ void JavaThread::collect_counters(jlong* array, int length) { // Attempt to enlarge the array for per thread counters. jlong* resize_counters_array(jlong* old_counters, int current_size, int new_size) { jlong* new_counters = NEW_C_HEAP_ARRAY_RETURN_NULL(jlong, new_size, mtJVMCI); - if (new_counters == NULL) { - return NULL; + if (new_counters == nullptr) { + return nullptr; } - if (old_counters == NULL) { + if (old_counters == nullptr) { old_counters = new_counters; memset(old_counters, 0, sizeof(jlong) * new_size); } else { @@ -303,7 +303,7 @@ jlong* resize_counters_array(jlong* old_counters, int current_size, int new_size // Attempt to enlarge the array for per thread counters. bool JavaThread::resize_counters(int current_size, int new_size) { jlong* new_counters = resize_counters_array(_jvmci_counters, current_size, new_size); - if (new_counters == NULL) { + if (new_counters == nullptr) { return false; } else { _jvmci_counters = new_counters; @@ -323,7 +323,7 @@ class VM_JVMCIResizeCounters : public VM_Operation { void doit() { // Resize the old thread counters array jlong* new_counters = resize_counters_array(JavaThread::_jvmci_old_thread_counters, JVMCICounterSize, _new_size); - if (new_counters == NULL) { + if (new_counters == nullptr) { _failed = true; return; } else { @@ -409,11 +409,11 @@ JavaThread::JavaThread() : _vm_result(nullptr), _vm_result_2(nullptr), - _current_pending_monitor(NULL), + _current_pending_monitor(nullptr), _current_pending_monitor_is_from_java(true), - _current_waiting_monitor(NULL), - _active_handles(NULL), - _free_handle_block(NULL), + _current_waiting_monitor(nullptr), + _active_handles(nullptr), + _free_handle_block(nullptr), _Stalled(0), _monitor_chunks(nullptr), @@ -541,9 +541,9 @@ void JavaThread::interrupt() { bool JavaThread::is_interrupted(bool clear_interrupted) { debug_only(check_for_dangling_thread_pointer(this);) - if (_threadObj.peek() == NULL) { + if (_threadObj.peek() == nullptr) { // If there is no j.l.Thread then it is impossible to have - // been interrupted. We can find NULL during VM initialization + // been interrupted. We can find null during VM initialization // or when a JNI thread is still in the process of attaching. // In such cases this must be the current thread. assert(this == Thread::current(), "invariant"); @@ -598,7 +598,7 @@ JavaThread::JavaThread(ThreadFunction entry_point, size_t stack_sz) : JavaThread thr_type = entry_point == &CompilerThread::thread_entry ? os::compiler_thread : os::java_thread; os::create_thread(this, thr_type, stack_sz); - // The _osthread may be NULL here because we ran out of memory (too many threads active). + // The _osthread may be null here because we ran out of memory (too many threads active). // We need to throw and OutOfMemoryError - however we cannot do this here because the caller // may hold a lock and all locks must be unlocked before throwing the exception (throwing // the exception consists of creating the exception object & initializing it, initialization @@ -617,30 +617,30 @@ JavaThread::~JavaThread() { // Return the sleep event to the free list ParkEvent::Release(_SleepEvent); - _SleepEvent = NULL; + _SleepEvent = nullptr; // Free any remaining previous UnrollBlock vframeArray* old_array = vframe_array_last(); - if (old_array != NULL) { + if (old_array != nullptr) { Deoptimization::UnrollBlock* old_info = old_array->unroll_block(); - old_array->set_unroll_block(NULL); + old_array->set_unroll_block(nullptr); delete old_info; delete old_array; } JvmtiDeferredUpdates* updates = deferred_updates(); - if (updates != NULL) { + if (updates != nullptr) { // This can only happen if thread is destroyed before deoptimization occurs. assert(updates->count() > 0, "Updates holder not deleted"); // free deferred updates. delete updates; - set_deferred_updates(NULL); + set_deferred_updates(nullptr); } // All Java related clean up happens in exit ThreadSafepointState::destroy(this); - if (_thread_stat != NULL) delete _thread_stat; + if (_thread_stat != nullptr) delete _thread_stat; #if INCLUDE_JVMCI if (JVMCICounterSize > 0) { @@ -697,7 +697,7 @@ void JavaThread::run() { void JavaThread::thread_main_inner() { assert(JavaThread::current() == this, "sanity check"); - assert(_threadObj.peek() != NULL, "just checking"); + assert(_threadObj.peek() != nullptr, "just checking"); // Execute thread entry point unless this thread has a pending exception. // Note: Due to JVMTI StopThread we can have pending exceptions already! @@ -733,14 +733,14 @@ static void ensure_join(JavaThread* thread) { java_lang_Thread::set_thread_status(threadObj(), JavaThreadStatus::TERMINATED); // Clear the native thread instance - this makes isAlive return false and allows the join() // to complete once we've done the notify_all below - java_lang_Thread::set_thread(threadObj(), NULL); + java_lang_Thread::set_thread(threadObj(), nullptr); lock.notify_all(thread); // Ignore pending exception, since we are exiting anyway thread->clear_pending_exception(); } static bool is_daemon(oop threadObj) { - return (threadObj != NULL && java_lang_Thread::is_daemon(threadObj)); + return (threadObj != nullptr && java_lang_Thread::is_daemon(threadObj)); } // For any new cleanup additions, please check to see if they need to be applied to @@ -878,15 +878,15 @@ void JavaThread::exit(bool destroy_vm, ExitType exit_type) { // is in a consistent state, in case GC happens JFR_ONLY(Jfr::on_thread_exit(this);) - if (active_handles() != NULL) { + if (active_handles() != nullptr) { JNIHandleBlock* block = active_handles(); - set_active_handles(NULL); + set_active_handles(nullptr); JNIHandleBlock::release_block(block); } - if (free_handle_block() != NULL) { + if (free_handle_block() != nullptr) { JNIHandleBlock* block = free_handle_block(); - set_free_handle_block(NULL); + set_free_handle_block(nullptr); JNIHandleBlock::release_block(block); } @@ -903,7 +903,7 @@ void JavaThread::exit(bool destroy_vm, ExitType exit_type) { // We need to cache the thread name for logging purposes below as once // we have called on_thread_detach this thread must not access any oops. - char* thread_name = NULL; + char* thread_name = nullptr; if (log_is_enabled(Debug, os, thread, timer)) { ResourceMark rm(this); thread_name = os::strdup(name()); @@ -949,15 +949,15 @@ void JavaThread::exit(bool destroy_vm, ExitType exit_type) { } void JavaThread::cleanup_failed_attach_current_thread(bool is_daemon) { - if (active_handles() != NULL) { + if (active_handles() != nullptr) { JNIHandleBlock* block = active_handles(); - set_active_handles(NULL); + set_active_handles(nullptr); JNIHandleBlock::release_block(block); } - if (free_handle_block() != NULL) { + if (free_handle_block() != nullptr) { JNIHandleBlock* block = free_handle_block(); - set_free_handle_block(NULL); + set_free_handle_block(nullptr); JNIHandleBlock::release_block(block); } @@ -979,7 +979,7 @@ JavaThread* JavaThread::active() { } else { assert(thread->is_VM_thread(), "this must be a vm thread"); VM_Operation* op = ((VMThread*) thread)->vm_operation(); - JavaThread *ret = op == NULL ? NULL : JavaThread::cast(op->calling_thread()); + JavaThread *ret = op == nullptr ? nullptr : JavaThread::cast(op->calling_thread()); return ret; } } @@ -987,7 +987,7 @@ JavaThread* JavaThread::active() { bool JavaThread::is_lock_owned(address adr) const { if (Thread::is_lock_owned(adr)) return true; - for (MonitorChunk* chunk = monitor_chunks(); chunk != NULL; chunk = chunk->next()) { + for (MonitorChunk* chunk = monitor_chunks(); chunk != nullptr; chunk = chunk->next()) { if (chunk->contains(adr)) return true; } @@ -1008,7 +1008,7 @@ void JavaThread::add_monitor_chunk(MonitorChunk* chunk) { } void JavaThread::remove_monitor_chunk(MonitorChunk* chunk) { - guarantee(monitor_chunks() != NULL, "must be non empty"); + guarantee(monitor_chunks() != nullptr, "must be non empty"); if (monitor_chunks() == chunk) { set_monitor_chunks(chunk->next()); } else { @@ -1030,7 +1030,7 @@ void JavaThread::handle_special_runtime_exit_condition() { // Asynchronous exceptions support // void JavaThread::handle_async_exception(oop java_throwable) { - assert(java_throwable != NULL, "should have an _async_exception to throw"); + assert(java_throwable != nullptr, "should have an _async_exception to throw"); assert(!is_at_poll_safepoint(), "should have never called this method"); if (has_last_Java_frame()) { @@ -1242,7 +1242,7 @@ void JavaThread::deoptimize() { jio_snprintf(buffer, sizeof(buffer), "%d", sd->bci()); size_t len = strlen(buffer); const char * found = strstr(DeoptimizeOnlyAt, buffer); - while (found != NULL) { + while (found != nullptr) { if ((found[len] == ',' || found[len] == '\n' || found[len] == '\0') && (found == DeoptimizeOnlyAt || found[-1] == ',' || found[-1] == '\n')) { // Check that the bci found is bracketed by terminators. @@ -1310,7 +1310,7 @@ void JavaThread::push_jni_handle_block() { // Inlined code from jni_PushLocalFrame() JNIHandleBlock* old_handles = active_handles(); JNIHandleBlock* new_handles = JNIHandleBlock::allocate_block(this); - assert(old_handles != NULL && new_handles != NULL, "should not be NULL"); + assert(old_handles != nullptr && new_handles != nullptr, "should not be null"); new_handles->set_pop_frame_link(old_handles); // make sure java handles get gc'd. set_active_handles(new_handles); } @@ -1322,7 +1322,7 @@ void JavaThread::pop_jni_handle_block() { JNIHandleBlock* new_handles = old_handles->pop_frame_link(); assert(new_handles != nullptr, "should never set active handles to null"); set_active_handles(new_handles); - old_handles->set_pop_frame_link(NULL); + old_handles->set_pop_frame_link(nullptr); JNIHandleBlock::release_block(old_handles, this); } @@ -1333,7 +1333,7 @@ void JavaThread::oops_do_no_frames(OopClosure* f, CodeBlobClosure* cf) { // Traverse the GCHandles Thread::oops_do_no_frames(f, cf); - if (active_handles() != NULL) { + if (active_handles() != nullptr) { active_handles()->oops_do(f); } @@ -1341,16 +1341,16 @@ void JavaThread::oops_do_no_frames(OopClosure* f, CodeBlobClosure* cf) { if (has_last_Java_frame()) { // Traverse the monitor chunks - for (MonitorChunk* chunk = monitor_chunks(); chunk != NULL; chunk = chunk->next()) { + for (MonitorChunk* chunk = monitor_chunks(); chunk != nullptr; chunk = chunk->next()) { chunk->oops_do(f); } } - assert(vframe_array_head() == NULL, "deopt in progress at a safepoint!"); + assert(vframe_array_head() == nullptr, "deopt in progress at a safepoint!"); // If we have deferred set_locals there might be oops waiting to be // written GrowableArray* list = JvmtiDeferredUpdates::deferred_locals(this); - if (list != NULL) { + if (list != nullptr) { for (int i = 0; i < list->length(); i++) { list->at(i)->oops_do(f); } @@ -1364,7 +1364,7 @@ void JavaThread::oops_do_no_frames(OopClosure* f, CodeBlobClosure* cf) { f->do_oop((oop*) &_jvmci_reserved_oop0); #endif - if (jvmti_thread_state() != NULL) { + if (jvmti_thread_state() != nullptr) { jvmti_thread_state()->oops_do(f, cf); } @@ -1385,7 +1385,7 @@ void JavaThread::oops_do_frames(OopClosure* f, CodeBlobClosure* cf) { return; } // Finish any pending lazy GC activity for the frames - StackWatermarkSet::finish_processing(this, NULL /* context */, StackWatermarkKind::gc); + StackWatermarkSet::finish_processing(this, nullptr /* context */, StackWatermarkKind::gc); // Traverse the execution stack for (StackFrameStream fst(this, true /* update */, false /* process_frames */); !fst.is_done(); fst.next()) { fst.current()->oops_do(f, cf, fst.register_map()); @@ -1410,7 +1410,7 @@ void JavaThread::nmethods_do(CodeBlobClosure* cf) { } } - if (jvmti_thread_state() != NULL) { + if (jvmti_thread_state() != nullptr) { jvmti_thread_state()->nmethods_do(cf); } } @@ -1424,11 +1424,11 @@ void JavaThread::metadata_do(MetadataClosure* f) { } else if (is_Compiler_thread()) { // need to walk ciMetadata in current compile tasks to keep alive. CompilerThread* ct = (CompilerThread*)this; - if (ct->env() != NULL) { + if (ct->env() != nullptr) { ct->env()->metadata_do(f); } CompileTask* task = ct->task(); - if (task != NULL) { + if (task != nullptr) { task->metadata_do(f); } } @@ -1462,7 +1462,7 @@ void JavaThread::print_on(outputStream *st, bool print_extended_info) const { st->print_raw(name()); st->print_raw("\" "); oop thread_oop = threadObj(); - if (thread_oop != NULL) { + if (thread_oop != nullptr) { st->print("#" INT64_FORMAT " [%ld] ", (int64_t)java_lang_Thread::thread_id(thread_oop), (long) osthread()->thread_id()); if (java_lang_Thread::is_daemon(thread_oop)) st->print("daemon "); st->print("prio=%d ", java_lang_Thread::priority(thread_oop)); @@ -1470,10 +1470,10 @@ void JavaThread::print_on(outputStream *st, bool print_extended_info) const { Thread::print_on(st, print_extended_info); // print guess for valid stack memory region (assume 4K pages); helps lock debugging st->print_cr("[" INTPTR_FORMAT "]", (intptr_t)last_Java_sp() & ~right_n_bits(12)); - if (thread_oop != NULL) { + if (thread_oop != nullptr) { if (is_vthread_mounted()) { oop vt = vthread(); - assert(vt != NULL, ""); + assert(vt != nullptr, ""); st->print_cr(" Carrying virtual thread #" INT64_FORMAT, (int64_t)java_lang_Thread::thread_id(vt)); } else { st->print_cr(" java.lang.Thread.State: %s", java_lang_Thread::thread_status_name(thread_oop)); @@ -1484,9 +1484,9 @@ void JavaThread::print_on(outputStream *st, bool print_extended_info) const { #endif // PRODUCT if (is_Compiler_thread()) { CompileTask *task = ((CompilerThread*)this)->task(); - if (task != NULL) { + if (task != nullptr) { st->print(" Compiling: "); - task->print(st, NULL, true, false); + task->print(st, nullptr, true, false); } else { st->print(" No compile task"); } @@ -1544,19 +1544,19 @@ static void frame_verify(frame* f, const RegisterMap *map) { f->verify(map); } void JavaThread::verify() { // Verify oops in the thread. - oops_do(&VerifyOopClosure::verify_oop, NULL); + oops_do(&VerifyOopClosure::verify_oop, nullptr); // Verify the stack frames. frames_do(frame_verify); } // CR 6300358 (sub-CR 2137150) -// Most callers of this method assume that it can't return NULL but a +// Most callers of this method assume that it can't return null but a // thread may not have a name whilst it is in the process of attaching to // the VM - see CR 6412693, and there are places where a JavaThread can be // seen prior to having its threadObj set (e.g., JNI attaching threads and // if vm exit occurs during initialization). These cases can all be accounted -// for such that this method never returns NULL. +// for such that this method never returns null. const char* JavaThread::name() const { if (Thread::is_JavaThread_protected(/* target */ this)) { // The target JavaThread is protected so get_thread_name_string() is safe: @@ -1567,7 +1567,7 @@ const char* JavaThread::name() const { return Thread::name(); } -// Returns a non-NULL representation of this thread's name, or a suitable +// Returns a non-null representation of this thread's name, or a suitable // descriptive string if there is no set name. const char* JavaThread::get_thread_name_string(char* buf, int buflen) const { const char* name_str; @@ -1579,10 +1579,10 @@ const char* JavaThread::get_thread_name_string(char* buf, int buflen) const { // or if it is a JavaThread that can safely access oops. #endif oop thread_obj = threadObj(); - if (thread_obj != NULL) { + if (thread_obj != nullptr) { oop name = java_lang_Thread::name(thread_obj); - if (name != NULL) { - if (buf == NULL) { + if (name != nullptr) { + if (buf == nullptr) { name_str = java_lang_String::as_utf8_string(name); } else { name_str = java_lang_String::as_utf8_string(name, buf, buflen); @@ -1608,16 +1608,16 @@ const char* JavaThread::get_thread_name_string(char* buf, int buflen) const { } } #endif - assert(name_str != NULL, "unexpected NULL thread name"); + assert(name_str != nullptr, "unexpected null thread name"); return name_str; } // Helper to extract the name from the thread oop for logging. const char* JavaThread::name_for(oop thread_obj) { - assert(thread_obj != NULL, "precondition"); + assert(thread_obj != nullptr, "precondition"); oop name = java_lang_Thread::name(thread_obj); const char* name_str; - if (name != NULL) { + if (name != nullptr) { name_str = java_lang_String::as_utf8_string(name); } else { name_str = ""; @@ -1668,10 +1668,10 @@ void JavaThread::prepare(jobject jni_thread, ThreadPriority prio) { oop JavaThread::current_park_blocker() { // Support for JSR-166 locks oop thread_oop = threadObj(); - if (thread_oop != NULL) { + if (thread_oop != nullptr) { return java_lang_Thread::park_blocker(thread_oop); } - return NULL; + return nullptr; } // Print current stack trace for checked JNI warnings and JNI fatal errors. @@ -1707,7 +1707,7 @@ void JavaThread::print_stack_on(outputStream* st) { RegisterMap::WalkContinuation::skip); vframe* start_vf = platform_thread_last_java_vframe(®_map); int count = 0; - for (vframe* f = start_vf; f != NULL; f = f->sender()) { + for (vframe* f = start_vf; f != nullptr; f = f->sender()) { if (f->is_java_frame()) { javaVFrame* jvf = javaVFrame::cast(f); java_lang_Throwable::print_stack_element(st, jvf->method(), jvf->bci()); @@ -1741,7 +1741,7 @@ void JavaThread::print_vthread_stack_on(outputStream* st) { ContinuationEntry* cont_entry = last_continuation(); vframe* start_vf = last_java_vframe(®_map); int count = 0; - for (vframe* f = start_vf; f != NULL; f = f->sender()) { + for (vframe* f = start_vf; f != nullptr; f = f->sender()) { // Watch for end of vthread stack if (Continuation::is_continuation_enterSpecial(f->fr())) { assert(cont_entry == Continuation::get_continuation_entry_for_entry_frame(this, f->fr()), ""); @@ -1793,7 +1793,7 @@ JvmtiThreadState* JavaThread::rebind_to_jvmti_thread_state_of(oop thread_oop) { // JVMTI PopFrame support void JavaThread::popframe_preserve_args(ByteSize size_in_bytes, void* start) { - assert(_popframe_preserved_args == NULL, "should not wipe out old PopFrame preserved arguments"); + assert(_popframe_preserved_args == nullptr, "should not wipe out old PopFrame preserved arguments"); if (in_bytes(size_in_bytes) != 0) { _popframe_preserved_args = NEW_C_HEAP_ARRAY(char, in_bytes(size_in_bytes), mtThread); _popframe_preserved_args_size = in_bytes(size_in_bytes); @@ -1816,9 +1816,9 @@ WordSize JavaThread::popframe_preserved_args_size_in_words() { } void JavaThread::popframe_free_preserved_args() { - assert(_popframe_preserved_args != NULL, "should not free PopFrame preserved arguments twice"); + assert(_popframe_preserved_args != nullptr, "should not free PopFrame preserved arguments twice"); FREE_C_HEAP_ARRAY(char, (char*)_popframe_preserved_args); - _popframe_preserved_args = NULL; + _popframe_preserved_args = nullptr; _popframe_preserved_args_size = 0; } @@ -1838,7 +1838,7 @@ class PrintAndVerifyOopClosure: public OopClosure { protected: template inline void do_oop_work(T* p) { oop obj = RawAccess<>::oop_load(p); - if (obj == NULL) return; + if (obj == nullptr) return; tty->print(INTPTR_FORMAT ": ", p2i(p)); if (oopDesc::is_oop_or_null(obj)) { if (obj->is_objArray()) { @@ -1938,7 +1938,7 @@ frame JavaThread::vthread_last_frame() { frame JavaThread::carrier_last_frame(RegisterMap* reg_map) { const ContinuationEntry* entry = vthread_continuation(); - guarantee (entry != NULL, "Not a carrier thread"); + guarantee (entry != nullptr, "Not a carrier thread"); frame f = entry->to_frame(); if (reg_map->process_frames()) { entry->flush_stack_processing(this); @@ -1952,11 +1952,11 @@ frame JavaThread::platform_thread_last_frame(RegisterMap* reg_map) { } javaVFrame* JavaThread::last_java_vframe(const frame f, RegisterMap *reg_map) { - assert(reg_map != NULL, "a map must be given"); + assert(reg_map != nullptr, "a map must be given"); for (vframe* vf = vframe::new_vframe(&f, reg_map, this); vf; vf = vf->sender()) { if (vf->is_java_frame()) return javaVFrame::cast(vf); } - return NULL; + return nullptr; } Klass* JavaThread::security_get_caller_class(int depth) { @@ -1968,7 +1968,7 @@ Klass* JavaThread::security_get_caller_class(int depth) { if (!vfst.at_end()) { return vfst.method()->method_holder(); } - return NULL; + return nullptr; } // java.lang.Thread.sleep support @@ -2043,7 +2043,7 @@ void JavaThread::invoke_shutdown_hooks() { Klass* shutdown_klass = SystemDictionary::resolve_or_null(vmSymbols::java_lang_Shutdown(), THREAD); - if (shutdown_klass != NULL) { + if (shutdown_klass != nullptr) { // SystemDictionary::resolve_or_null will return null if there was // an exception. If we cannot load the Shutdown class, just don't // call Shutdown.shutdown() at all. This will mean the shutdown hooks @@ -2092,7 +2092,7 @@ Handle JavaThread::create_system_thread_object(const char* name, void JavaThread::start_internal_daemon(JavaThread* current, JavaThread* target, Handle thread_oop, ThreadPriority prio) { - assert(target->osthread() != NULL, "target thread is not properly initialized"); + assert(target->osthread() != nullptr, "target thread is not properly initialized"); MutexLocker mu(current, Threads_lock); diff --git a/src/hotspot/share/runtime/javaThread.hpp b/src/hotspot/share/runtime/javaThread.hpp index ff1d04164f0ca..b26b5027a5ac4 100644 --- a/src/hotspot/share/runtime/javaThread.hpp +++ b/src/hotspot/share/runtime/javaThread.hpp @@ -131,7 +131,7 @@ class JavaThread: public Thread { // adapter to store the callee Method*. This value is NEVER live // across a gc point so it does NOT have to be gc'd // The handshake is open ended since we can't be certain that it will - // be NULLed. This is because we rarely ever see the race and end up + // be nulled. This is because we rarely ever see the race and end up // in handle_wrong_method which is the backend of the handshake. See // code in i2c adapters and handle_wrong_method. @@ -163,7 +163,7 @@ class JavaThread: public Thread { ObjectMonitor* current_pending_monitor() { // Use Atomic::load() to prevent data race between concurrent modification and // concurrent readers, e.g. ThreadService::get_current_contended_monitor(). - // Especially, reloading pointer from thread after NULL check must be prevented. + // Especially, reloading pointer from thread after null check must be prevented. return Atomic::load(&_current_pending_monitor); } void set_current_pending_monitor(ObjectMonitor* monitor) { @@ -224,7 +224,7 @@ class JavaThread: public Thread { friend class AsyncExceptionHandshake; friend class HandshakeState; - void install_async_exception(AsyncExceptionHandshake* aec = NULL); + void install_async_exception(AsyncExceptionHandshake* aec = nullptr); void handle_async_exception(oop java_throwable); public: bool has_async_exception_condition(); @@ -514,7 +514,7 @@ class JavaThread: public Thread { return on_thread_list() && !is_terminated(); } - // Thread oop. threadObj() can be NULL for initial JavaThread + // Thread oop. threadObj() can be null for initial JavaThread // (or for threads attached via JNI) oop threadObj() const; void set_threadOopHandles(oop p); @@ -536,7 +536,7 @@ class JavaThread: public Thread { ThreadFunction entry_point() const { return _entry_point; } - // Allocates a new Java level thread object for this thread. thread_name may be NULL. + // Allocates a new Java level thread object for this thread. thread_name may be null. void allocate_threadObj(Handle thread_group, const char* thread_name, bool daemon, TRAPS); // Last frame anchor routines @@ -595,7 +595,7 @@ class JavaThread: public Thread { void push_cont_fastpath(intptr_t* sp) { if (sp > _cont_fastpath) _cont_fastpath = sp; } void set_cont_fastpath_thread_state(bool x) { _cont_fastpath_thread_state = (int)x; } intptr_t* raw_cont_fastpath() const { return _cont_fastpath; } - bool cont_fastpath() const { return _cont_fastpath == NULL && _cont_fastpath_thread_state != 0; } + bool cont_fastpath() const { return _cont_fastpath == nullptr && _cont_fastpath_thread_state != 0; } bool cont_fastpath_thread_state() const { return _cont_fastpath_thread_state != 0; } void inc_held_monitor_count(int i = 1, bool jni = false); @@ -706,8 +706,8 @@ class JavaThread: public Thread { void set_pending_deoptimization(int reason) { _pending_deoptimization = reason; } void set_pending_failed_speculation(jlong failed_speculation) { _pending_failed_speculation = failed_speculation; } void set_pending_transfer_to_interpreter(bool b) { _pending_transfer_to_interpreter = b; } - void set_jvmci_alternate_call_target(address a) { assert(_jvmci._alternate_call_target == NULL, "must be"); _jvmci._alternate_call_target = a; } - void set_jvmci_implicit_exception_pc(address a) { assert(_jvmci._implicit_exception_pc == NULL, "must be"); _jvmci._implicit_exception_pc = a; } + void set_jvmci_alternate_call_target(address a) { assert(_jvmci._alternate_call_target == nullptr, "must be"); _jvmci._alternate_call_target = a; } + void set_jvmci_implicit_exception_pc(address a) { assert(_jvmci._implicit_exception_pc == nullptr, "must be"); _jvmci._implicit_exception_pc = a; } virtual bool in_retryable_allocation() const { return _in_retryable_allocation; } void set_in_retryable_allocation(bool b) { _in_retryable_allocation = b; } @@ -729,8 +729,8 @@ class JavaThread: public Thread { void set_is_method_handle_return(bool value) { _is_method_handle_return = value ? 1 : 0; } void clear_exception_oop_and_pc() { - set_exception_oop(NULL); - set_exception_pc(NULL); + set_exception_oop(nullptr); + set_exception_pc(nullptr); } // Check if address is in the usable part of the stack (excludes protected @@ -847,8 +847,8 @@ class JavaThread: public Thread { // pending check, this is done for Native->Java transitions (i.e. user JNI code). // VM->Java transitions are not cleared, it is expected that JNI code enclosed // within ThreadToNativeFromVM makes proper exception checks (i.e. VM internal). - bool is_pending_jni_exception_check() const { return _pending_jni_exception_check_fn != NULL; } - void clear_pending_jni_exception_check() { _pending_jni_exception_check_fn = NULL; } + bool is_pending_jni_exception_check() const { return _pending_jni_exception_check_fn != nullptr; } + void clear_pending_jni_exception_check() { _pending_jni_exception_check_fn = nullptr; } const char* get_pending_jni_exception_check() const { return _pending_jni_exception_check_fn; } void set_pending_jni_exception_check(const char* fn_name) { _pending_jni_exception_check_fn = (char*) fn_name; } @@ -876,7 +876,7 @@ class JavaThread: public Thread { void set_entry_point(ThreadFunction entry_point) { _entry_point = entry_point; } // factor out low-level mechanics for use in both normal and error cases - const char* get_thread_name_string(char* buf = NULL, int buflen = 0) const; + const char* get_thread_name_string(char* buf = nullptr, int buflen = 0) const; public: @@ -969,7 +969,7 @@ class JavaThread: public Thread { return JavaThread::cast(Thread::current()); } - // Returns the current thread as a JavaThread, or NULL if not attached + // Returns the current thread as a JavaThread, or nullptr if not attached static inline JavaThread* current_or_null(); // Casts @@ -1000,10 +1000,10 @@ class JavaThread: public Thread { void set_jvmti_thread_state(JvmtiThreadState *value) { _jvmti_thread_state = value; } // A JvmtiThreadState is lazily allocated. This jvmti_thread_state() // getter is used to get this JavaThread's JvmtiThreadState if it has - // one which means NULL can be returned. JvmtiThreadState::state_for() + // one which means null can be returned. JvmtiThreadState::state_for() // is used to get the specified JavaThread's JvmtiThreadState if it has // one or it allocates a new JvmtiThreadState for the JavaThread and - // returns it. JvmtiThreadState::state_for() will return NULL only if + // returns it. JvmtiThreadState::state_for() will return null only if // the specified JavaThread is exiting. JvmtiThreadState *jvmti_thread_state() const { return _jvmti_thread_state; } static ByteSize jvmti_thread_state_offset() { return byte_offset_of(JavaThread, _jvmti_thread_state); } diff --git a/src/hotspot/share/runtime/javaThread.inline.hpp b/src/hotspot/share/runtime/javaThread.inline.hpp index 1b472a7abf789..4e4399d77a4a0 100644 --- a/src/hotspot/share/runtime/javaThread.inline.hpp +++ b/src/hotspot/share/runtime/javaThread.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -148,7 +148,7 @@ inline JavaThreadState JavaThread::thread_state() const { } inline void JavaThread::set_thread_state(JavaThreadState s) { - assert(current_or_null() == NULL || current_or_null() == this, + assert(current_or_null() == nullptr || current_or_null() == this, "state change should only be called by the current thread"); #if defined(PPC64) || defined (AARCH64) || defined(RISCV64) // Use membars when accessing volatile _thread_state. See @@ -225,8 +225,8 @@ inline void JavaThread::set_terminated(TerminatedTypes t) { // Allow tracking of class initialization monitor use inline void JavaThread::set_class_to_be_initialized(InstanceKlass* k) { - assert((k == NULL && _class_to_be_initialized != NULL) || - (k != NULL && _class_to_be_initialized == NULL), "incorrect usage"); + assert((k == nullptr && _class_to_be_initialized != nullptr) || + (k != nullptr && _class_to_be_initialized == nullptr), "incorrect usage"); assert(this == Thread::current(), "Only the current thread can set this field"); _class_to_be_initialized = k; } diff --git a/src/hotspot/share/runtime/jniHandles.cpp b/src/hotspot/share/runtime/jniHandles.cpp index 4f89f0beab9b8..29c57bb1ac909 100644 --- a/src/hotspot/share/runtime/jniHandles.cpp +++ b/src/hotspot/share/runtime/jniHandles.cpp @@ -47,8 +47,8 @@ OopStorage* JNIHandles::weak_global_handles() { } // Serviceability agent support. -OopStorage* JNIHandles::_global_handles = NULL; -OopStorage* JNIHandles::_weak_global_handles = NULL; +OopStorage* JNIHandles::_global_handles = nullptr; +OopStorage* JNIHandles::_weak_global_handles = nullptr; void jni_handles_init() { JNIHandles::_global_handles = OopStorageSet::create_strong("JNI Global", mtInternal); @@ -59,10 +59,10 @@ jobject JNIHandles::make_local(oop obj) { return make_local(JavaThread::current(), obj); } -// Used by NewLocalRef which requires NULL on out-of-memory +// Used by NewLocalRef which requires null on out-of-memory jobject JNIHandles::make_local(JavaThread* thread, oop obj, AllocFailType alloc_failmode) { - if (obj == NULL) { - return NULL; // ignore null handles + if (obj == nullptr) { + return nullptr; // ignore null handles } else { assert(oopDesc::is_oop(obj), "not an oop"); assert(!current_thread_in_native(), "must not be in native"); @@ -85,14 +85,14 @@ static void report_handle_allocation_failure(AllocFailType alloc_failmode, jobject JNIHandles::make_global(Handle obj, AllocFailType alloc_failmode) { assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC"); assert(!current_thread_in_native(), "must not be in native"); - jobject res = NULL; + jobject res = nullptr; if (!obj.is_null()) { // ignore null handles assert(oopDesc::is_oop(obj()), "not an oop"); oop* ptr = global_handles()->allocate(); - // Return NULL on allocation failure. - if (ptr != NULL) { - assert(NativeAccess::oop_load(ptr) == oop(NULL), "invariant"); + // Return null on allocation failure. + if (ptr != nullptr) { + assert(NativeAccess::oop_load(ptr) == oop(nullptr), "invariant"); NativeAccess<>::oop_store(ptr, obj()); char* tptr = reinterpret_cast(ptr) + TypeTag::global; res = reinterpret_cast(tptr); @@ -107,14 +107,14 @@ jobject JNIHandles::make_global(Handle obj, AllocFailType alloc_failmode) { jweak JNIHandles::make_weak_global(Handle obj, AllocFailType alloc_failmode) { assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC"); assert(!current_thread_in_native(), "must not be in native"); - jweak res = NULL; + jweak res = nullptr; if (!obj.is_null()) { // ignore null handles assert(oopDesc::is_oop(obj()), "not an oop"); oop* ptr = weak_global_handles()->allocate(); - // Return NULL on allocation failure. - if (ptr != NULL) { - assert(NativeAccess::oop_load(ptr) == oop(NULL), "invariant"); + // Return nullptr on allocation failure. + if (ptr != nullptr) { + assert(NativeAccess::oop_load(ptr) == oop(nullptr), "invariant"); NativeAccess::oop_store(ptr, obj()); char* tptr = reinterpret_cast(ptr) + TypeTag::weak_global; res = reinterpret_cast(tptr); @@ -125,38 +125,38 @@ jweak JNIHandles::make_weak_global(Handle obj, AllocFailType alloc_failmode) { return res; } -// Resolve some erroneous cases to NULL, rather than treating them as +// Resolve some erroneous cases to null, rather than treating them as // possibly unchecked errors. In particular, deleted handles are -// treated as NULL (though a deleted and later reallocated handle +// treated as null (though a deleted and later reallocated handle // isn't detected). oop JNIHandles::resolve_external_guard(jobject handle) { - oop result = NULL; - if (handle != NULL) { + oop result = nullptr; + if (handle != nullptr) { result = resolve_impl(handle); } return result; } bool JNIHandles::is_weak_global_cleared(jweak handle) { - assert(handle != NULL, "precondition"); + assert(handle != nullptr, "precondition"); oop* oop_ptr = weak_global_ptr(handle); oop value = NativeAccess::oop_load(oop_ptr); - return value == NULL; + return value == nullptr; } void JNIHandles::destroy_global(jobject handle) { - if (handle != NULL) { + if (handle != nullptr) { oop* oop_ptr = global_ptr(handle); - NativeAccess<>::oop_store(oop_ptr, (oop)NULL); + NativeAccess<>::oop_store(oop_ptr, (oop)nullptr); global_handles()->release(oop_ptr); } } void JNIHandles::destroy_weak_global(jweak handle) { - if (handle != NULL) { + if (handle != nullptr) { oop* oop_ptr = weak_global_ptr(handle); - NativeAccess::oop_store(oop_ptr, (oop)NULL); + NativeAccess::oop_store(oop_ptr, (oop)nullptr); weak_global_handles()->release(oop_ptr); } } @@ -181,7 +181,7 @@ inline bool is_storage_handle(const OopStorage* storage, const oop* ptr) { jobjectRefType JNIHandles::handle_type(JavaThread* thread, jobject handle) { - assert(handle != NULL, "precondition"); + assert(handle != nullptr, "precondition"); jobjectRefType result = JNIInvalidRefType; if (is_weak_global_tagged(handle)) { if (is_storage_handle(weak_global_handles(), weak_global_ptr(handle))) { @@ -212,11 +212,11 @@ jobjectRefType JNIHandles::handle_type(JavaThread* thread, jobject handle) { bool JNIHandles::is_local_handle(JavaThread* thread, jobject handle) { - assert(handle != NULL, "precondition"); + assert(handle != nullptr, "precondition"); JNIHandleBlock* block = thread->active_handles(); // Look back past possible native calls to jni_PushLocalFrame. - while (block != NULL) { + while (block != nullptr) { if (block->chain_contains(handle)) { return true; } @@ -231,7 +231,7 @@ bool JNIHandles::is_local_handle(JavaThread* thread, jobject handle) { // come from, so we'll check the whole stack. bool JNIHandles::is_frame_handle(JavaThread* thr, jobject handle) { - assert(handle != NULL, "precondition"); + assert(handle != nullptr, "precondition"); // If there is no java frame, then this must be top level code, such // as the java command executable, in which case, this type of handle // is not permitted. @@ -241,14 +241,14 @@ bool JNIHandles::is_frame_handle(JavaThread* thr, jobject handle) { bool JNIHandles::is_global_handle(jobject handle) { - assert(handle != NULL, "precondition"); + assert(handle != nullptr, "precondition"); assert(!is_global_tagged(handle) || is_storage_handle(global_handles(), global_ptr(handle)), "invalid storage"); return is_global_tagged(handle); } bool JNIHandles::is_weak_global_handle(jobject handle) { - assert(handle != NULL, "precondition"); + assert(handle != nullptr, "precondition"); assert(!is_weak_global_tagged(handle) || is_storage_handle(weak_global_handles(), weak_global_ptr(handle)), "invalid storage"); return is_weak_global_tagged(handle); } @@ -315,7 +315,7 @@ void JNIHandleBlock::zap() { // Zap block values _top = 0; for (int index = 0; index < block_size_in_oops; index++) { - // NOT using Access here; just bare clobbering to NULL, since the + // NOT using Access here; just bare clobbering to null, since the // block no longer contains valid oops. _handles[index] = 0; } @@ -324,20 +324,20 @@ void JNIHandleBlock::zap() { JNIHandleBlock* JNIHandleBlock::allocate_block(JavaThread* thread, AllocFailType alloc_failmode) { // The VM thread can allocate a handle block in behalf of another thread during a safepoint. - assert(thread == NULL || thread == Thread::current() || SafepointSynchronize::is_at_safepoint(), + assert(thread == nullptr || thread == Thread::current() || SafepointSynchronize::is_at_safepoint(), "sanity check"); JNIHandleBlock* block; // Check the thread-local free list for a block so we don't // have to acquire a mutex. - if (thread != NULL && thread->free_handle_block() != NULL) { + if (thread != nullptr && thread->free_handle_block() != nullptr) { block = thread->free_handle_block(); thread->set_free_handle_block(block->_next); } else { // Allocate new block if (alloc_failmode == AllocFailStrategy::RETURN_NULL) { block = new (std::nothrow) JNIHandleBlock(); - if (block == NULL) { - return NULL; + if (block == nullptr) { + return nullptr; } } else { block = new JNIHandleBlock(); @@ -346,46 +346,46 @@ JNIHandleBlock* JNIHandleBlock::allocate_block(JavaThread* thread, AllocFailType block->zap(); } block->_top = 0; - block->_next = NULL; - block->_pop_frame_link = NULL; + block->_next = nullptr; + block->_pop_frame_link = nullptr; // _last, _free_list & _allocate_before_rebuild initialized in allocate_handle - debug_only(block->_last = NULL); - debug_only(block->_free_list = NULL); + debug_only(block->_last = nullptr); + debug_only(block->_free_list = nullptr); debug_only(block->_allocate_before_rebuild = -1); return block; } void JNIHandleBlock::release_block(JNIHandleBlock* block, JavaThread* thread) { - assert(thread == NULL || thread == Thread::current(), "sanity check"); + assert(thread == nullptr || thread == Thread::current(), "sanity check"); JNIHandleBlock* pop_frame_link = block->pop_frame_link(); // Put returned block at the beginning of the thread-local free list. - // Note that if thread == NULL, we use it as an implicit argument that + // Note that if thread == nullptr, we use it as an implicit argument that // we _don't_ want the block to be kept on the free_handle_block. // See for instance JavaThread::exit(). - if (thread != NULL ) { + if (thread != nullptr ) { block->zap(); JNIHandleBlock* freelist = thread->free_handle_block(); - block->_pop_frame_link = NULL; + block->_pop_frame_link = nullptr; thread->set_free_handle_block(block); // Add original freelist to end of chain - if ( freelist != NULL ) { - while ( block->_next != NULL ) block = block->_next; + if ( freelist != nullptr ) { + while ( block->_next != nullptr ) block = block->_next; block->_next = freelist; } - block = NULL; + block = nullptr; } else { - DEBUG_ONLY(block->set_pop_frame_link(NULL)); - while (block != NULL) { + DEBUG_ONLY(block->set_pop_frame_link(nullptr)); + while (block != nullptr) { JNIHandleBlock* next = block->_next; Atomic::dec(&_blocks_allocated); - assert(block->pop_frame_link() == NULL, "pop_frame_link should be NULL"); + assert(block->pop_frame_link() == nullptr, "pop_frame_link should be nullptr"); delete block; block = next; } } - if (pop_frame_link != NULL) { + if (pop_frame_link != nullptr) { // As a sanity check we release blocks pointed to by the pop_frame_link. // This should never happen (only if PopLocalFrame is not called the // correct number of times). @@ -398,10 +398,10 @@ void JNIHandleBlock::oops_do(OopClosure* f) { JNIHandleBlock* current_chain = this; // Iterate over chain of blocks, followed by chains linked through the // pop frame links. - while (current_chain != NULL) { - for (JNIHandleBlock* current = current_chain; current != NULL; + while (current_chain != nullptr) { + for (JNIHandleBlock* current = current_chain; current != nullptr; current = current->_next) { - assert(current == current_chain || current->pop_frame_link() == NULL, + assert(current == current_chain || current->pop_frame_link() == nullptr, "only blocks first in chain should have pop frame link set"); for (int index = 0; index < current->_top; index++) { uintptr_t* addr = &(current->_handles)[index]; @@ -429,15 +429,15 @@ jobject JNIHandleBlock::allocate_handle(JavaThread* caller, oop obj, AllocFailTy // This is the first allocation or the initial block got zapped when // entering a native function. If we have any following blocks they are // not valid anymore. - for (JNIHandleBlock* current = _next; current != NULL; + for (JNIHandleBlock* current = _next; current != nullptr; current = current->_next) { - assert(current->_last == NULL, "only first block should have _last set"); - assert(current->_free_list == NULL, + assert(current->_last == nullptr, "only first block should have _last set"); + assert(current->_free_list == nullptr, "only first block should have _free_list set"); if (current->_top == 0) { // All blocks after the first clear trailing block are already cleared. #ifdef ASSERT - for (current = current->_next; current != NULL; current = current->_next) { + for (current = current->_next; current != nullptr; current = current->_next) { assert(current->_top == 0, "trailing blocks must already be cleared"); } #endif @@ -447,7 +447,7 @@ jobject JNIHandleBlock::allocate_handle(JavaThread* caller, oop obj, AllocFailTy current->zap(); } // Clear initial block - _free_list = NULL; + _free_list = nullptr; _allocate_before_rebuild = 0; _last = this; zap(); @@ -461,14 +461,14 @@ jobject JNIHandleBlock::allocate_handle(JavaThread* caller, oop obj, AllocFailTy } // Try free list - if (_free_list != NULL) { + if (_free_list != nullptr) { oop* handle = (oop*)_free_list; _free_list = (uintptr_t*) untag_free_list(*_free_list); *handle = obj; return (jobject) handle; } // Check if unused block follow last - if (_last->_next != NULL) { + if (_last->_next != nullptr) { // update last and retry _last = _last->_next; return allocate_handle(caller, obj, alloc_failmode); @@ -479,8 +479,8 @@ jobject JNIHandleBlock::allocate_handle(JavaThread* caller, oop obj, AllocFailTy rebuild_free_list(); // updates _allocate_before_rebuild counter } else { _last->_next = JNIHandleBlock::allocate_block(caller, alloc_failmode); - if (_last->_next == NULL) { - return NULL; + if (_last->_next == nullptr) { + return nullptr; } _last = _last->_next; _allocate_before_rebuild--; @@ -489,15 +489,15 @@ jobject JNIHandleBlock::allocate_handle(JavaThread* caller, oop obj, AllocFailTy } void JNIHandleBlock::rebuild_free_list() { - assert(_allocate_before_rebuild == 0 && _free_list == NULL, "just checking"); + assert(_allocate_before_rebuild == 0 && _free_list == nullptr, "just checking"); int free = 0; int blocks = 0; - for (JNIHandleBlock* current = this; current != NULL; current = current->_next) { + for (JNIHandleBlock* current = this; current != nullptr; current = current->_next) { for (int index = 0; index < current->_top; index++) { uintptr_t* handle = &(current->_handles)[index]; if (*handle == 0) { // this handle was cleared out by a delete call, reuse it - *handle = _free_list == NULL ? 0 : tag_free_list((uintptr_t)_free_list); + *handle = _free_list == nullptr ? 0 : tag_free_list((uintptr_t)_free_list); _free_list = handle; free++; } @@ -524,7 +524,7 @@ bool JNIHandleBlock::contains(jobject handle) const { bool JNIHandleBlock::chain_contains(jobject handle) const { - for (JNIHandleBlock* current = (JNIHandleBlock*) this; current != NULL; current = current->_next) { + for (JNIHandleBlock* current = (JNIHandleBlock*) this; current != nullptr; current = current->_next) { if (current->contains(handle)) { return true; } diff --git a/src/hotspot/share/runtime/jniHandles.hpp b/src/hotspot/share/runtime/jniHandles.hpp index 367beca660005..fe2defe236073 100644 --- a/src/hotspot/share/runtime/jniHandles.hpp +++ b/src/hotspot/share/runtime/jniHandles.hpp @@ -112,13 +112,13 @@ class JNIHandles : AllStatic { static void print_on(outputStream* st); static void print(); static void verify(); - // The category predicates all require handle != NULL. + // The category predicates all require handle != nullptr. static bool is_local_handle(JavaThread* thread, jobject handle); static bool is_frame_handle(JavaThread* thread, jobject handle); static bool is_global_handle(jobject handle); static bool is_weak_global_handle(jobject handle); - // precondition: handle != NULL. + // precondition: handle != nullptr. static jobjectRefType handle_type(JavaThread* thread, jobject handle); // Garbage collection support(global handles only, local handles are traversed from thread) @@ -170,8 +170,8 @@ class JNIHandleBlock : public CHeapObj { jobject allocate_handle(JavaThread* caller, oop obj, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); // Block allocation and block free list management - static JNIHandleBlock* allocate_block(JavaThread* thread = NULL, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); - static void release_block(JNIHandleBlock* block, JavaThread* thread = NULL); + static JNIHandleBlock* allocate_block(JavaThread* thread = nullptr, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); + static void release_block(JNIHandleBlock* block, JavaThread* thread = nullptr); // JNI PushLocalFrame/PopLocalFrame support JNIHandleBlock* pop_frame_link() const { return _pop_frame_link; } diff --git a/src/hotspot/share/runtime/jniHandles.inline.hpp b/src/hotspot/share/runtime/jniHandles.inline.hpp index e77264fcd16a6..052ab138fef25 100644 --- a/src/hotspot/share/runtime/jniHandles.inline.hpp +++ b/src/hotspot/share/runtime/jniHandles.inline.hpp @@ -69,7 +69,7 @@ inline oop* JNIHandles::weak_global_ptr(jweak handle) { // external_guard is true if called from resolve_external_guard. template inline oop JNIHandles::resolve_impl(jobject handle) { - assert(handle != NULL, "precondition"); + assert(handle != nullptr, "precondition"); assert(!current_thread_in_native(), "must not be in native"); oop result; if (is_weak_global_tagged(handle)) { // Unlikely @@ -78,27 +78,27 @@ inline oop JNIHandles::resolve_impl(jobject handle) { result = NativeAccess::oop_load(global_ptr(handle)); // Construction of jobjects canonicalize a null value into a null // jobject, so for non-jweak the pointee should never be null. - assert(external_guard || result != NULL, "Invalid JNI handle"); + assert(external_guard || result != nullptr, "Invalid JNI handle"); } else { result = *local_ptr(handle); // Construction of jobjects canonicalize a null value into a null // jobject, so for non-jweak the pointee should never be null. - assert(external_guard || result != NULL, "Invalid JNI handle"); + assert(external_guard || result != nullptr, "Invalid JNI handle"); } return result; } inline oop JNIHandles::resolve(jobject handle) { - oop result = NULL; - if (handle != NULL) { + oop result = nullptr; + if (handle != nullptr) { result = resolve_impl(handle); } return result; } inline oop JNIHandles::resolve_no_keepalive(jobject handle) { - oop result = NULL; - if (handle != NULL) { + oop result = nullptr; + if (handle != nullptr) { result = resolve_impl(handle); } return result; @@ -111,15 +111,15 @@ inline bool JNIHandles::is_same_object(jobject handle1, jobject handle2) { } inline oop JNIHandles::resolve_non_null(jobject handle) { - assert(handle != NULL, "JNI handle should not be null"); + assert(handle != nullptr, "JNI handle should not be null"); oop result = resolve_impl(handle); - assert(result != NULL, "NULL read from jni handle"); + assert(result != nullptr, "nullptr read from jni handle"); return result; } inline void JNIHandles::destroy_local(jobject handle) { - if (handle != NULL) { - *local_ptr(handle) = NULL; + if (handle != nullptr) { + *local_ptr(handle) = nullptr; } } diff --git a/src/hotspot/share/runtime/jniPeriodicChecker.cpp b/src/hotspot/share/runtime/jniPeriodicChecker.cpp index 96d16834608fb..273fe3f2c1689 100644 --- a/src/hotspot/share/runtime/jniPeriodicChecker.cpp +++ b/src/hotspot/share/runtime/jniPeriodicChecker.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2007, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,7 +39,7 @@ class JniPeriodicCheckerTask : public PeriodicTask { //---------------------------------------------------------- // Implementation of JniPeriodicChecker -JniPeriodicCheckerTask* JniPeriodicChecker::_task = NULL; +JniPeriodicCheckerTask* JniPeriodicChecker::_task = nullptr; /* * The engage() method is called at initialization time via diff --git a/src/hotspot/share/runtime/jniPeriodicChecker.hpp b/src/hotspot/share/runtime/jniPeriodicChecker.hpp index 4d6489117b2c7..367aa7a0854ae 100644 --- a/src/hotspot/share/runtime/jniPeriodicChecker.hpp +++ b/src/hotspot/share/runtime/jniPeriodicChecker.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2007, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -44,7 +44,7 @@ class JniPeriodicChecker : AllStatic { public: // Start/stop task static void engage(); - static bool is_active() { return _task != NULL; } + static bool is_active() { return _task != nullptr; } }; #endif // SHARE_RUNTIME_JNIPERIODICCHECKER_HPP diff --git a/src/hotspot/share/runtime/keepStackGCProcessed.cpp b/src/hotspot/share/runtime/keepStackGCProcessed.cpp index 06ab94dab3905..b90ea3628ca74 100644 --- a/src/hotspot/share/runtime/keepStackGCProcessed.cpp +++ b/src/hotspot/share/runtime/keepStackGCProcessed.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,7 +39,7 @@ KeepStackGCProcessedMark::KeepStackGCProcessedMark(JavaThread* jt) : return; } StackWatermark* our_watermark = StackWatermarkSet::get(JavaThread::current(), StackWatermarkKind::gc); - if (our_watermark == NULL) { + if (our_watermark == nullptr) { _active = false; return; } @@ -56,5 +56,5 @@ KeepStackGCProcessedMark::~KeepStackGCProcessedMark() { } void KeepStackGCProcessedMark::finish_processing() { - StackWatermarkSet::finish_processing(_jt, NULL /* context */, StackWatermarkKind::gc); + StackWatermarkSet::finish_processing(_jt, nullptr /* context */, StackWatermarkKind::gc); } diff --git a/src/hotspot/share/runtime/monitorChunk.cpp b/src/hotspot/share/runtime/monitorChunk.cpp index 32c5069087ef5..c54ad685cdbbb 100644 --- a/src/hotspot/share/runtime/monitorChunk.cpp +++ b/src/hotspot/share/runtime/monitorChunk.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,7 +30,7 @@ MonitorChunk::MonitorChunk(int number_on_monitors) { _number_of_monitors = number_on_monitors; _monitors = NEW_C_HEAP_ARRAY(BasicObjectLock, number_on_monitors, mtSynchronizer); - _next = NULL; + _next = nullptr; } diff --git a/src/hotspot/share/runtime/mutex.cpp b/src/hotspot/share/runtime/mutex.cpp index 0ba4d9e1ddeac..3fd7fbd8300a8 100644 --- a/src/hotspot/share/runtime/mutex.cpp +++ b/src/hotspot/share/runtime/mutex.cpp @@ -40,13 +40,13 @@ class InFlightMutexRelease { Mutex* _in_flight_mutex; public: InFlightMutexRelease(Mutex* in_flight_mutex) : _in_flight_mutex(in_flight_mutex) { - assert(in_flight_mutex != NULL, "must be"); + assert(in_flight_mutex != nullptr, "must be"); } void operator()(JavaThread* current) { _in_flight_mutex->release_for_safepoint(); - _in_flight_mutex = NULL; + _in_flight_mutex = nullptr; } - bool not_released() { return _in_flight_mutex != NULL; } + bool not_released() { return _in_flight_mutex != nullptr; } }; #ifdef ASSERT @@ -124,7 +124,7 @@ void Mutex::lock(Thread* self) { lock_contended(self); } - assert_owner(NULL); + assert_owner(nullptr); set_owner(self); } @@ -145,7 +145,7 @@ void Mutex::lock_without_safepoint_check(Thread * self) { check_rank(self); _lock.lock(); - assert_owner(NULL); + assert_owner(nullptr); set_owner(self); } @@ -171,7 +171,7 @@ bool Mutex::try_lock_inner(bool do_rank_checks) { check_block_state(self); if (_lock.try_lock()) { - assert_owner(NULL); + assert_owner(nullptr); set_owner(self); return true; } @@ -189,13 +189,13 @@ bool Mutex::try_lock_without_rank_check() { } void Mutex::release_for_safepoint() { - assert_owner(NULL); + assert_owner(nullptr); _lock.unlock(); } void Mutex::unlock() { DEBUG_ONLY(assert_owner(Thread::current())); - set_owner(NULL); + set_owner(nullptr); _lock.unlock(); } @@ -216,9 +216,9 @@ bool Monitor::wait_without_safepoint_check(uint64_t timeout) { assert_owner(self); check_rank(self); - // conceptually set the owner to NULL in anticipation of + // conceptually set the owner to null in anticipation of // abdicating the lock in wait - set_owner(NULL); + set_owner(nullptr); // Check safepoint state after resetting owner and possible NSV. check_no_safepoint_state(self); @@ -237,9 +237,9 @@ bool Monitor::wait(uint64_t timeout) { assert_owner(self); check_rank(self); - // conceptually set the owner to NULL in anticipation of + // conceptually set the owner to null in anticipation of // abdicating the lock in wait - set_owner(NULL); + set_owner(nullptr); // Check safepoint state after resetting owner and possible NSV. check_safepoint_state(self); @@ -256,7 +256,7 @@ bool Monitor::wait(uint64_t timeout) { if (ifmr.not_released()) { // Not unlocked by ~ThreadBlockInVMPreprocess - assert_owner(NULL); + assert_owner(nullptr); // Conceptually reestablish ownership of the lock. set_owner(self); } else { @@ -267,13 +267,13 @@ bool Monitor::wait(uint64_t timeout) { } Mutex::~Mutex() { - assert_owner(NULL); + assert_owner(nullptr); os::free(const_cast(_name)); } -Mutex::Mutex(Rank rank, const char * name, bool allow_vm_block) : _owner(NULL) { +Mutex::Mutex(Rank rank, const char * name, bool allow_vm_block) : _owner(nullptr) { assert(os::mutex_init_done(), "Too early!"); - assert(name != NULL, "Mutex requires a name"); + assert(name != nullptr, "Mutex requires a name"); _name = os::strdup(name, mtInternal); #ifdef ASSERT _allow_vm_block = allow_vm_block; @@ -363,7 +363,7 @@ void Mutex::print() const { #ifdef ASSERT void Mutex::assert_owner(Thread * expected) { const char* msg = "invalid owner"; - if (expected == NULL) { + if (expected == nullptr) { msg = "should be un-owned"; } else if (expected == Thread::current()) { @@ -376,7 +376,7 @@ void Mutex::assert_owner(Thread * expected) { Mutex* Mutex::get_least_ranked_lock(Mutex* locks) { Mutex *res, *tmp; - for (res = tmp = locks; tmp != NULL; tmp = tmp->next()) { + for (res = tmp = locks; tmp != nullptr; tmp = tmp->next()) { if (tmp->rank() < res->rank()) { res = tmp; } @@ -386,8 +386,8 @@ Mutex* Mutex::get_least_ranked_lock(Mutex* locks) { Mutex* Mutex::get_least_ranked_lock_besides_this(Mutex* locks) { Mutex *res, *tmp; - for (res = NULL, tmp = locks; tmp != NULL; tmp = tmp->next()) { - if (tmp != this && (res == NULL || tmp->rank() < res->rank())) { + for (res = nullptr, tmp = locks; tmp != nullptr; tmp = tmp->next()) { + if (tmp != this && (res == nullptr || tmp->rank() < res->rank())) { res = tmp; } } @@ -401,8 +401,8 @@ void Mutex::check_rank(Thread* thread) { // We expect the locks already acquired to be in increasing rank order, // modulo locks acquired in try_lock_without_rank_check() - for (Mutex* tmp = locks_owned; tmp != NULL; tmp = tmp->next()) { - if (tmp->next() != NULL) { + for (Mutex* tmp = locks_owned; tmp != nullptr; tmp = tmp->next()) { + if (tmp->next() != nullptr) { assert(tmp->rank() < tmp->next()->rank() || tmp->skip_rank_check(), "mutex rank anomaly?"); } @@ -416,7 +416,7 @@ void Mutex::check_rank(Thread* thread) { // able to check for safepoints first with a TBIVM. // For all threads, we enforce not holding the tty lock or below, since this could block progress also. // Also "this" should be the monitor with lowest rank owned by this thread. - if (least != NULL && ((least->rank() <= Mutex::nosafepoint && thread->is_Java_thread()) || + if (least != nullptr && ((least->rank() <= Mutex::nosafepoint && thread->is_Java_thread()) || least->rank() <= Mutex::tty || least->rank() <= this->rank())) { ResourceMark rm(thread); @@ -436,7 +436,7 @@ void Mutex::check_rank(Thread* thread) { // that the thread holds and m2 is the mutex the thread is trying // to acquire, then deadlock prevention rules require that the rank // of m2 be less than the rank of m1. This prevents circular waits. - if (least != NULL && least->rank() <= this->rank()) { + if (least != nullptr && least->rank() <= this->rank()) { ResourceMark rm(thread); if (least->rank() > Mutex::tty) { // Printing owned locks acquires tty lock. If the least rank was below or equal @@ -461,15 +461,15 @@ void Mutex::set_owner_implementation(Thread *new_owner) { // It uses the Mutex::_owner, Mutex::_next, and // Thread::_owned_locks fields, and no other function // changes those fields. - // It is illegal to set the mutex from one non-NULL - // owner to another--it must be owned by NULL as an + // It is illegal to set the mutex from one non-null + // owner to another--it must be owned by null as an // intermediate state. - if (new_owner != NULL) { + if (new_owner != nullptr) { // the thread is acquiring this lock assert(new_owner == Thread::current(), "Should I be doing this?"); - assert(owner() == NULL, "setting the owner thread of an already owned mutex"); + assert(owner() == nullptr, "setting the owner thread of an already owned mutex"); raw_set_owner(new_owner); // set the owner // link "this" into the owned locks list @@ -490,30 +490,30 @@ void Mutex::set_owner_implementation(Thread *new_owner) { _last_owner = old_owner; _skip_rank_check = false; - assert(old_owner != NULL, "removing the owner thread of an unowned mutex"); + assert(old_owner != nullptr, "removing the owner thread of an unowned mutex"); assert(old_owner == Thread::current(), "removing the owner thread of an unowned mutex"); - raw_set_owner(NULL); // set the owner + raw_set_owner(nullptr); // set the owner Mutex* locks = old_owner->owned_locks(); // remove "this" from the owned locks list - Mutex* prev = NULL; + Mutex* prev = nullptr; bool found = false; - for (; locks != NULL; prev = locks, locks = locks->next()) { + for (; locks != nullptr; prev = locks, locks = locks->next()) { if (locks == this) { found = true; break; } } assert(found, "Removing a lock not owned"); - if (prev == NULL) { + if (prev == nullptr) { old_owner->_owned_locks = _next; } else { prev->_next = _next; } - _next = NULL; + _next = nullptr; // ~NSV implied with locking allow_vm_block flag. if (old_owner->is_Java_thread() && _allow_vm_block && this != tty_lock) { diff --git a/src/hotspot/share/runtime/mutex.hpp b/src/hotspot/share/runtime/mutex.hpp index c387e2898a504..3f38bcd5ed551 100644 --- a/src/hotspot/share/runtime/mutex.hpp +++ b/src/hotspot/share/runtime/mutex.hpp @@ -89,7 +89,7 @@ class Mutex : public CHeapObj { private: // The _owner field is only set by the current thread, either to itself after it has acquired - // the low-level _lock, or to NULL before it has released the _lock. Accesses by any thread other + // the low-level _lock, or to null before it has released the _lock. Accesses by any thread other // than the lock owner are inherently racy. Thread* volatile _owner; void raw_set_owner(Thread* new_owner) { Atomic::store(&_owner, new_owner); } @@ -168,7 +168,7 @@ class Mutex : public CHeapObj { void lock(); // prints out warning if VM thread blocks void lock(Thread *thread); // overloaded with current thread void unlock(); - bool is_locked() const { return owner() != NULL; } + bool is_locked() const { return owner() != nullptr; } bool try_lock(); // Like lock(), but unblocking. It returns false instead private: diff --git a/src/hotspot/share/runtime/mutexLocker.cpp b/src/hotspot/share/runtime/mutexLocker.cpp index 6ee976fde6027..9e34f143b5159 100644 --- a/src/hotspot/share/runtime/mutexLocker.cpp +++ b/src/hotspot/share/runtime/mutexLocker.cpp @@ -42,126 +42,126 @@ // eliminating the indirection and using instances instead. // Consider using GCC's __read_mostly. -Mutex* Patching_lock = NULL; -Mutex* CompiledMethod_lock = NULL; -Monitor* SystemDictionary_lock = NULL; -Mutex* InvokeMethodTable_lock = NULL; -Mutex* SharedDictionary_lock = NULL; -Monitor* ClassInitError_lock = NULL; -Mutex* Module_lock = NULL; -Mutex* CompiledIC_lock = NULL; -Mutex* InlineCacheBuffer_lock = NULL; -Mutex* VMStatistic_lock = NULL; -Mutex* JmethodIdCreation_lock = NULL; -Mutex* JfieldIdCreation_lock = NULL; -Monitor* JNICritical_lock = NULL; -Mutex* JvmtiThreadState_lock = NULL; -Monitor* EscapeBarrier_lock = NULL; -Monitor* JvmtiVTMSTransition_lock = NULL; -Monitor* Heap_lock = NULL; +Mutex* Patching_lock = nullptr; +Mutex* CompiledMethod_lock = nullptr; +Monitor* SystemDictionary_lock = nullptr; +Mutex* InvokeMethodTable_lock = nullptr; +Mutex* SharedDictionary_lock = nullptr; +Monitor* ClassInitError_lock = nullptr; +Mutex* Module_lock = nullptr; +Mutex* CompiledIC_lock = nullptr; +Mutex* InlineCacheBuffer_lock = nullptr; +Mutex* VMStatistic_lock = nullptr; +Mutex* JmethodIdCreation_lock = nullptr; +Mutex* JfieldIdCreation_lock = nullptr; +Monitor* JNICritical_lock = nullptr; +Mutex* JvmtiThreadState_lock = nullptr; +Monitor* EscapeBarrier_lock = nullptr; +Monitor* JvmtiVTMSTransition_lock = nullptr; +Monitor* Heap_lock = nullptr; #ifdef INCLUDE_PARALLELGC -Mutex* PSOldGenExpand_lock = NULL; +Mutex* PSOldGenExpand_lock = nullptr; #endif -Mutex* AdapterHandlerLibrary_lock = NULL; -Mutex* SignatureHandlerLibrary_lock = NULL; -Mutex* VtableStubs_lock = NULL; -Mutex* SymbolArena_lock = NULL; -Monitor* StringDedup_lock = NULL; -Mutex* StringDedupIntern_lock = NULL; -Monitor* CodeCache_lock = NULL; -Mutex* TouchedMethodLog_lock = NULL; -Mutex* RetData_lock = NULL; -Monitor* VMOperation_lock = NULL; -Monitor* Threads_lock = NULL; -Mutex* NonJavaThreadsList_lock = NULL; -Mutex* NonJavaThreadsListSync_lock = NULL; -Monitor* CGC_lock = NULL; -Monitor* STS_lock = NULL; -Monitor* G1OldGCCount_lock = NULL; -Mutex* G1DetachedRefinementStats_lock = NULL; -Mutex* MarkStackFreeList_lock = NULL; -Mutex* MarkStackChunkList_lock = NULL; -Mutex* MonitoringSupport_lock = NULL; -Mutex* ParGCRareEvent_lock = NULL; -Monitor* ConcurrentGCBreakpoints_lock = NULL; -Mutex* Compile_lock = NULL; -Monitor* MethodCompileQueue_lock = NULL; -Monitor* CompileThread_lock = NULL; -Monitor* Compilation_lock = NULL; -Mutex* CompileTaskAlloc_lock = NULL; -Mutex* CompileStatistics_lock = NULL; -Mutex* DirectivesStack_lock = NULL; -Mutex* MultiArray_lock = NULL; -Monitor* Terminator_lock = NULL; -Monitor* InitCompleted_lock = NULL; -Monitor* BeforeExit_lock = NULL; -Monitor* Notify_lock = NULL; -Mutex* ExceptionCache_lock = NULL; +Mutex* AdapterHandlerLibrary_lock = nullptr; +Mutex* SignatureHandlerLibrary_lock = nullptr; +Mutex* VtableStubs_lock = nullptr; +Mutex* SymbolArena_lock = nullptr; +Monitor* StringDedup_lock = nullptr; +Mutex* StringDedupIntern_lock = nullptr; +Monitor* CodeCache_lock = nullptr; +Mutex* TouchedMethodLog_lock = nullptr; +Mutex* RetData_lock = nullptr; +Monitor* VMOperation_lock = nullptr; +Monitor* Threads_lock = nullptr; +Mutex* NonJavaThreadsList_lock = nullptr; +Mutex* NonJavaThreadsListSync_lock = nullptr; +Monitor* CGC_lock = nullptr; +Monitor* STS_lock = nullptr; +Monitor* G1OldGCCount_lock = nullptr; +Mutex* G1DetachedRefinementStats_lock = nullptr; +Mutex* MarkStackFreeList_lock = nullptr; +Mutex* MarkStackChunkList_lock = nullptr; +Mutex* MonitoringSupport_lock = nullptr; +Mutex* ParGCRareEvent_lock = nullptr; +Monitor* ConcurrentGCBreakpoints_lock = nullptr; +Mutex* Compile_lock = nullptr; +Monitor* MethodCompileQueue_lock = nullptr; +Monitor* CompileThread_lock = nullptr; +Monitor* Compilation_lock = nullptr; +Mutex* CompileTaskAlloc_lock = nullptr; +Mutex* CompileStatistics_lock = nullptr; +Mutex* DirectivesStack_lock = nullptr; +Mutex* MultiArray_lock = nullptr; +Monitor* Terminator_lock = nullptr; +Monitor* InitCompleted_lock = nullptr; +Monitor* BeforeExit_lock = nullptr; +Monitor* Notify_lock = nullptr; +Mutex* ExceptionCache_lock = nullptr; #ifndef PRODUCT -Mutex* FullGCALot_lock = NULL; +Mutex* FullGCALot_lock = nullptr; #endif -Mutex* tty_lock = NULL; +Mutex* tty_lock = nullptr; -Mutex* RawMonitor_lock = NULL; -Mutex* PerfDataMemAlloc_lock = NULL; -Mutex* PerfDataManager_lock = NULL; -Mutex* OopMapCacheAlloc_lock = NULL; +Mutex* RawMonitor_lock = nullptr; +Mutex* PerfDataMemAlloc_lock = nullptr; +Mutex* PerfDataManager_lock = nullptr; +Mutex* OopMapCacheAlloc_lock = nullptr; -Mutex* FreeList_lock = NULL; -Mutex* OldSets_lock = NULL; -Mutex* Uncommit_lock = NULL; -Monitor* RootRegionScan_lock = NULL; +Mutex* FreeList_lock = nullptr; +Mutex* OldSets_lock = nullptr; +Mutex* Uncommit_lock = nullptr; +Monitor* RootRegionScan_lock = nullptr; -Mutex* Management_lock = NULL; -Monitor* MonitorDeflation_lock = NULL; -Monitor* Service_lock = NULL; -Monitor* Notification_lock = NULL; -Monitor* PeriodicTask_lock = NULL; -Monitor* RedefineClasses_lock = NULL; -Mutex* Verify_lock = NULL; -Monitor* Zip_lock = NULL; +Mutex* Management_lock = nullptr; +Monitor* MonitorDeflation_lock = nullptr; +Monitor* Service_lock = nullptr; +Monitor* Notification_lock = nullptr; +Monitor* PeriodicTask_lock = nullptr; +Monitor* RedefineClasses_lock = nullptr; +Mutex* Verify_lock = nullptr; +Monitor* Zip_lock = nullptr; #if INCLUDE_JFR -Mutex* JfrStacktrace_lock = NULL; -Monitor* JfrMsg_lock = NULL; -Mutex* JfrBuffer_lock = NULL; -Monitor* JfrThreadSampler_lock = NULL; +Mutex* JfrStacktrace_lock = nullptr; +Monitor* JfrMsg_lock = nullptr; +Mutex* JfrBuffer_lock = nullptr; +Monitor* JfrThreadSampler_lock = nullptr; #endif #ifndef SUPPORTS_NATIVE_CX8 -Mutex* UnsafeJlong_lock = NULL; +Mutex* UnsafeJlong_lock = nullptr; #endif -Mutex* CodeHeapStateAnalytics_lock = NULL; +Mutex* CodeHeapStateAnalytics_lock = nullptr; -Monitor* ContinuationRelativize_lock = NULL; +Monitor* ContinuationRelativize_lock = nullptr; -Mutex* Metaspace_lock = NULL; -Monitor* MetaspaceCritical_lock = NULL; -Mutex* ClassLoaderDataGraph_lock = NULL; -Monitor* ThreadsSMRDelete_lock = NULL; -Mutex* ThreadIdTableCreate_lock = NULL; -Mutex* SharedDecoder_lock = NULL; -Mutex* DCmdFactory_lock = NULL; -Mutex* NMTQuery_lock = NULL; +Mutex* Metaspace_lock = nullptr; +Monitor* MetaspaceCritical_lock = nullptr; +Mutex* ClassLoaderDataGraph_lock = nullptr; +Monitor* ThreadsSMRDelete_lock = nullptr; +Mutex* ThreadIdTableCreate_lock = nullptr; +Mutex* SharedDecoder_lock = nullptr; +Mutex* DCmdFactory_lock = nullptr; +Mutex* NMTQuery_lock = nullptr; #if INCLUDE_CDS #if INCLUDE_JVMTI -Mutex* CDSClassFileStream_lock = NULL; +Mutex* CDSClassFileStream_lock = nullptr; #endif -Mutex* DumpTimeTable_lock = NULL; -Mutex* CDSLambda_lock = NULL; -Mutex* DumpRegion_lock = NULL; -Mutex* ClassListFile_lock = NULL; -Mutex* UnregisteredClassesTable_lock= NULL; -Mutex* LambdaFormInvokers_lock = NULL; -Mutex* ScratchObjects_lock = NULL; +Mutex* DumpTimeTable_lock = nullptr; +Mutex* CDSLambda_lock = nullptr; +Mutex* DumpRegion_lock = nullptr; +Mutex* ClassListFile_lock = nullptr; +Mutex* UnregisteredClassesTable_lock= nullptr; +Mutex* LambdaFormInvokers_lock = nullptr; +Mutex* ScratchObjects_lock = nullptr; #endif // INCLUDE_CDS -Mutex* Bootclasspath_lock = NULL; +Mutex* Bootclasspath_lock = nullptr; #if INCLUDE_JVMCI -Monitor* JVMCI_lock = NULL; -Monitor* JVMCIRuntime_lock = NULL; +Monitor* JVMCI_lock = nullptr; +Monitor* JVMCIRuntime_lock = nullptr; #endif @@ -172,7 +172,7 @@ static int _num_mutex; #ifdef ASSERT void assert_locked_or_safepoint(const Mutex* lock) { // check if this thread owns the lock (common case) - assert(lock != NULL, "Need non-NULL lock"); + assert(lock != nullptr, "Need non-null lock"); if (lock->owned_by_self()) return; if (SafepointSynchronize::is_at_safepoint()) return; if (!Universe::is_fully_initialized()) return; @@ -181,7 +181,7 @@ void assert_locked_or_safepoint(const Mutex* lock) { // a weaker assertion than the above void assert_locked_or_safepoint_weak(const Mutex* lock) { - assert(lock != NULL, "Need non-NULL lock"); + assert(lock != nullptr, "Need non-null lock"); if (lock->is_locked()) return; if (SafepointSynchronize::is_at_safepoint()) return; if (!Universe::is_fully_initialized()) return; @@ -190,7 +190,7 @@ void assert_locked_or_safepoint_weak(const Mutex* lock) { // a stronger assertion than the above void assert_lock_strong(const Mutex* lock) { - assert(lock != NULL, "Need non-NULL lock"); + assert(lock != nullptr, "Need non-null lock"); if (lock->owned_by_self()) return; fatal("must own lock %s", lock->name()); } @@ -405,7 +405,7 @@ void print_owned_locks_on_error(outputStream* st) { bool none = true; for (int i = 0; i < _num_mutex; i++) { // see if it has an owner - if (_mutex_array[i]->owner() != NULL) { + if (_mutex_array[i]->owner() != nullptr) { if (none) { // print format used by Mutex::print_on_error() st->print_cr(" ([mutex/lock_event])"); diff --git a/src/hotspot/share/runtime/mutexLocker.hpp b/src/hotspot/share/runtime/mutexLocker.hpp index e0d9dab2f9321..05c6e69b8c7f2 100644 --- a/src/hotspot/share/runtime/mutexLocker.hpp +++ b/src/hotspot/share/runtime/mutexLocker.hpp @@ -195,7 +195,7 @@ class MutexLocker: public StackObj { MutexLocker(Mutex* mutex, Mutex::SafepointCheckFlag flag = Mutex::_safepoint_check_flag) : _mutex(mutex) { bool no_safepoint_check = flag == Mutex::_no_safepoint_check_flag; - if (_mutex != NULL) { + if (_mutex != nullptr) { if (no_safepoint_check) { _mutex->lock_without_safepoint_check(); } else { @@ -207,7 +207,7 @@ class MutexLocker: public StackObj { MutexLocker(Thread* thread, Mutex* mutex, Mutex::SafepointCheckFlag flag = Mutex::_safepoint_check_flag) : _mutex(mutex) { bool no_safepoint_check = flag == Mutex::_no_safepoint_check_flag; - if (_mutex != NULL) { + if (_mutex != nullptr) { if (no_safepoint_check) { _mutex->lock_without_safepoint_check(thread); } else { @@ -217,7 +217,7 @@ class MutexLocker: public StackObj { } ~MutexLocker() { - if (_mutex != NULL) { + if (_mutex != nullptr) { assert_lock_strong(_mutex); _mutex->unlock(); } @@ -228,7 +228,7 @@ class MutexLocker: public StackObj { // A MonitorLocker is like a MutexLocker above, except it allows // wait/notify as well which are delegated to the underlying Monitor. -// It also disallows NULL. +// It also disallows null. class MonitorLocker: public MutexLocker { Mutex::SafepointCheckFlag _flag; @@ -242,13 +242,13 @@ class MonitorLocker: public MutexLocker { MonitorLocker(Monitor* monitor, Mutex::SafepointCheckFlag flag = Mutex::_safepoint_check_flag) : MutexLocker(monitor, flag), _flag(flag) { // Superclass constructor did locking - assert(monitor != NULL, "NULL monitor not allowed"); + assert(monitor != nullptr, "null monitor not allowed"); } MonitorLocker(Thread* thread, Monitor* monitor, Mutex::SafepointCheckFlag flag = Mutex::_safepoint_check_flag) : MutexLocker(thread, monitor, flag), _flag(flag) { // Superclass constructor did locking - assert(monitor != NULL, "NULL monitor not allowed"); + assert(monitor != nullptr, "null monitor not allowed"); } bool wait(int64_t timeout = 0) { diff --git a/src/hotspot/share/runtime/nonJavaThread.cpp b/src/hotspot/share/runtime/nonJavaThread.cpp index 053111304bf18..fa68c2be1d746 100644 --- a/src/hotspot/share/runtime/nonJavaThread.cpp +++ b/src/hotspot/share/runtime/nonJavaThread.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -47,7 +47,7 @@ class NonJavaThread::List { NonJavaThread* volatile _head; SingleWriterSynchronizer _protect; - List() : _head(NULL), _protect() {} + List() : _head(nullptr), _protect() {} }; NonJavaThread::List NonJavaThread::_the_list; @@ -66,8 +66,8 @@ void NonJavaThread::Iterator::step() { _current = Atomic::load_acquire(&_current->_next); } -NonJavaThread::NonJavaThread() : Thread(), _next(NULL) { - assert(BarrierSet::barrier_set() != NULL, "NonJavaThread created too soon!"); +NonJavaThread::NonJavaThread() : Thread(), _next(nullptr) { + assert(BarrierSet::barrier_set() != nullptr, "NonJavaThread created too soon!"); } NonJavaThread::~NonJavaThread() { } @@ -86,7 +86,7 @@ void NonJavaThread::remove_from_the_list() { // Cleanup BarrierSet-related data before removing from list. BarrierSet::barrier_set()->on_thread_detach(this); NonJavaThread* volatile* p = &_the_list._head; - for (NonJavaThread* t = *p; t != NULL; p = &t->_next, t = *p) { + for (NonJavaThread* t = *p; t != nullptr; p = &t->_next, t = *p) { if (t == this) { *p = _next; break; @@ -98,7 +98,7 @@ void NonJavaThread::remove_from_the_list() { // from NJTList_lock in case an iteration attempts to lock it. MutexLocker ml(NonJavaThreadsListSync_lock, Mutex::_no_safepoint_check_flag); _the_list._protect.synchronize(); - _next = NULL; // Safe to drop the link now. + _next = nullptr; // Safe to drop the link now. } void NonJavaThread::pre_run() { @@ -106,7 +106,7 @@ void NonJavaThread::pre_run() { // This is slightly odd in that NamedThread is a subclass, but // in fact name() is defined in Thread - assert(this->name() != NULL, "thread name was not set before it was started"); + assert(this->name() != nullptr, "thread name was not set before it was started"); this->set_native_thread_name(this->name()); } @@ -123,8 +123,8 @@ void NonJavaThread::post_run() { // uniquely named instances should derive from this. NamedThread::NamedThread() : NonJavaThread(), - _name(NULL), - _processed_thread(NULL), + _name(nullptr), + _processed_thread(nullptr), _gc_id(GCId::undefined()) {} @@ -133,7 +133,7 @@ NamedThread::~NamedThread() { } void NamedThread::set_name(const char* format, ...) { - guarantee(_name == NULL, "Only get to set name once."); + guarantee(_name == nullptr, "Only get to set name once."); _name = NEW_C_HEAP_ARRAY(char, max_name_len, mtThread); va_list ap; va_start(ap, format); @@ -154,12 +154,12 @@ void NamedThread::print_on(outputStream* st) const { // be replaced by an abstraction over whatever native support for // timer interrupts exists on the platform. -WatcherThread* WatcherThread::_watcher_thread = NULL; +WatcherThread* WatcherThread::_watcher_thread = nullptr; bool WatcherThread::_startable = false; volatile bool WatcherThread::_should_terminate = false; WatcherThread::WatcherThread() : NonJavaThread() { - assert(watcher_thread() == NULL, "we can only allocate one WatcherThread"); + assert(watcher_thread() == nullptr, "we can only allocate one WatcherThread"); if (os::create_thread(this, os::watcher_thread)) { _watcher_thread = this; @@ -285,7 +285,7 @@ void WatcherThread::run() { // Signal that it is terminated { MutexLocker mu(Terminator_lock, Mutex::_no_safepoint_check_flag); - _watcher_thread = NULL; + _watcher_thread = nullptr; Terminator_lock->notify_all(); } } @@ -293,7 +293,7 @@ void WatcherThread::run() { void WatcherThread::start() { assert(PeriodicTask_lock->owned_by_self(), "PeriodicTask_lock required"); - if (watcher_thread() == NULL && _startable) { + if (watcher_thread() == nullptr && _startable) { _should_terminate = false; // Create the single instance of WatcherThread new WatcherThread(); @@ -313,7 +313,7 @@ void WatcherThread::stop() { _should_terminate = true; WatcherThread* watcher = watcher_thread(); - if (watcher != NULL) { + if (watcher != nullptr) { // unpark the WatcherThread so it can see that it should terminate watcher->unpark(); } @@ -321,7 +321,7 @@ void WatcherThread::stop() { MonitorLocker mu(Terminator_lock); - while (watcher_thread() != NULL) { + while (watcher_thread() != nullptr) { // This wait should make safepoint checks and wait without a timeout. mu.wait(0); } diff --git a/src/hotspot/share/runtime/nonJavaThread.hpp b/src/hotspot/share/runtime/nonJavaThread.hpp index 5eb26e429241a..825fd48485ca6 100644 --- a/src/hotspot/share/runtime/nonJavaThread.hpp +++ b/src/hotspot/share/runtime/nonJavaThread.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -65,7 +65,7 @@ class NonJavaThread::Iterator : public StackObj { Iterator(); ~Iterator(); - bool end() const { return _current == NULL; } + bool end() const { return _current == nullptr; } NonJavaThread* current() const { return _current; } void step(); }; @@ -91,7 +91,7 @@ class NamedThread: public NonJavaThread { // May only be called once per thread. void set_name(const char* format, ...) ATTRIBUTE_PRINTF(2, 3); virtual bool is_Named_thread() const { return true; } - virtual const char* name() const { return _name == NULL ? "Unknown Thread" : _name; } + virtual const char* name() const { return _name == nullptr ? "Unknown Thread" : _name; } virtual const char* type_name() const { return "NamedThread"; } Thread *processed_thread() { return _processed_thread; } void set_processed_thread(Thread *thread) { _processed_thread = thread; } diff --git a/src/hotspot/share/runtime/objectMonitor.cpp b/src/hotspot/share/runtime/objectMonitor.cpp index 72c53107e46f5..ea539be29288b 100644 --- a/src/hotspot/share/runtime/objectMonitor.cpp +++ b/src/hotspot/share/runtime/objectMonitor.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -66,11 +66,11 @@ #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \ - char* bytes = NULL; \ + char* bytes = nullptr; \ int len = 0; \ jlong jtid = SharedRuntime::get_java_tid(thread); \ Symbol* klassname = obj->klass()->name(); \ - if (klassname != NULL) { \ + if (klassname != nullptr) { \ bytes = (char*)klassname->bytes(); \ len = klassname->utf8_length(); \ } @@ -121,7 +121,7 @@ static int Knob_PreSpin = 10; // 20-100 likely better DEBUG_ONLY(static volatile bool InitDone = false;) -OopStorage* ObjectMonitor::_oop_storage = NULL; +OopStorage* ObjectMonitor::_oop_storage = nullptr; // ----------------------------------------------------------------------------- // Theory of operations -- Monitors lists, thread residency, etc: @@ -259,18 +259,18 @@ static void check_object_context() { ObjectMonitor::ObjectMonitor(oop object) : _header(markWord::zero()), _object(_oop_storage, object), - _owner(NULL), + _owner(nullptr), _previous_owner_tid(0), - _next_om(NULL), + _next_om(nullptr), _recursions(0), - _EntryList(NULL), - _cxq(NULL), - _succ(NULL), - _Responsible(NULL), + _EntryList(nullptr), + _cxq(nullptr), + _succ(nullptr), + _Responsible(nullptr), _Spinner(0), _SpinDuration(ObjectMonitor::Knob_SpinLimit), _contentions(0), - _WaitSet(NULL), + _WaitSet(nullptr), _waiters(0), _WaitSetLock(0) { } @@ -282,14 +282,14 @@ ObjectMonitor::~ObjectMonitor() { oop ObjectMonitor::object() const { check_object_context(); if (_object.is_null()) { - return NULL; + return nullptr; } return _object.resolve(); } oop ObjectMonitor::object_peek() const { if (_object.is_null()) { - return NULL; + return nullptr; } return _object.peek(); } @@ -297,7 +297,7 @@ oop ObjectMonitor::object_peek() const { void ObjectMonitor::ExitOnSuspend::operator()(JavaThread* current) { if (current->is_suspended()) { _om->_recursions = 0; - _om->_succ = NULL; + _om->_succ = nullptr; // Don't need a full fence after clearing successor here because of the call to exit(). _om->exit(current, false /* not_suspended */); _om_exited = true; @@ -309,7 +309,7 @@ void ObjectMonitor::ExitOnSuspend::operator()(JavaThread* current) { void ObjectMonitor::ClearSuccOnSuspend::operator()(JavaThread* current) { if (current->is_suspended()) { if (_om->_succ == current) { - _om->_succ = NULL; + _om->_succ = nullptr; OrderAccess::fence(); // always do a full fence when successor is cleared } } @@ -322,8 +322,8 @@ bool ObjectMonitor::enter(JavaThread* current) { // The following code is ordered to check the most common cases first // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors. - void* cur = try_set_owner_from(NULL, current); - if (cur == NULL) { + void* cur = try_set_owner_from(nullptr, current); + if (cur == nullptr) { assert(_recursions == 0, "invariant"); return true; } @@ -373,7 +373,7 @@ bool ObjectMonitor::enter(JavaThread* current) { // above lost the race to async deflation. Undo the work and // force the caller to retry. const oop l_object = object(); - if (l_object != NULL) { + if (l_object != nullptr) { // Attempt to restore the header/dmw to the object's header so that // we only retry once if the deflater thread happens to be slow. install_displaced_markword_in_object(l_object); @@ -396,7 +396,7 @@ bool ObjectMonitor::enter(JavaThread* current) { { // Change java thread status to indicate blocked on monitor enter. JavaThreadBlockedOnMonitorEnterState jtbmes(current, this); - assert(current->current_pending_monitor() == NULL, "invariant"); + assert(current->current_pending_monitor() == nullptr, "invariant"); current->set_current_pending_monitor(this); DTRACE_MONITOR_PROBE(contended__enter, this, object(), current); @@ -419,7 +419,7 @@ bool ObjectMonitor::enter(JavaThread* current) { { ThreadBlockInVMPreprocess tbivs(current, eos, true /* allow_suspend */); EnterI(current); - current->set_current_pending_monitor(NULL); + current->set_current_pending_monitor(nullptr); // We can go to a safepoint at the end of this block. If we // do a thread dump during that safepoint, then this thread will show // as having "-locked" the monitor, but the OS and java.lang.Thread @@ -484,8 +484,8 @@ bool ObjectMonitor::enter(JavaThread* current) { int ObjectMonitor::TryLock(JavaThread* current) { void* own = owner_raw(); - if (own != NULL) return 0; - if (try_set_owner_from(NULL, current) == NULL) { + if (own != nullptr) return 0; + if (try_set_owner_from(nullptr, current) == nullptr) { assert(_recursions == 0, "invariant"); return 1; } @@ -527,20 +527,20 @@ bool ObjectMonitor::deflate_monitor() { const oop obj = object_peek(); - if (obj == NULL) { + if (obj == nullptr) { // If the object died, we can recycle the monitor without racing with // Java threads. The GC already broke the association with the object. - set_owner_from(NULL, DEFLATER_MARKER); + set_owner_from(nullptr, DEFLATER_MARKER); assert(contentions() >= 0, "must be non-negative: contentions=%d", contentions()); _contentions = INT_MIN; // minimum negative int } else { // Attempt async deflation protocol. - // Set a NULL owner to DEFLATER_MARKER to force any contending thread + // Set a nullptr owner to DEFLATER_MARKER to force any contending thread // through the slow path. This is just the first part of the async // deflation dance. - if (try_set_owner_from(NULL, DEFLATER_MARKER) != NULL) { - // The owner field is no longer NULL so we lost the race since the + if (try_set_owner_from(nullptr, DEFLATER_MARKER) != nullptr) { + // The owner field is no longer null so we lost the race since the // ObjectMonitor is now busy. return false; } @@ -549,8 +549,8 @@ bool ObjectMonitor::deflate_monitor() { // Another thread has raced to enter the ObjectMonitor after // is_busy() above or has already entered and waited on // it which makes it busy so no deflation. Restore owner to - // NULL if it is still DEFLATER_MARKER. - if (try_set_owner_from(DEFLATER_MARKER, NULL) != DEFLATER_MARKER) { + // null if it is still DEFLATER_MARKER. + if (try_set_owner_from(DEFLATER_MARKER, nullptr) != DEFLATER_MARKER) { // Deferred decrement for the JT EnterI() that cancelled the async deflation. add_to_contentions(-1); } @@ -561,9 +561,9 @@ bool ObjectMonitor::deflate_monitor() { // to retry. This is the second part of the async deflation dance. if (Atomic::cmpxchg(&_contentions, 0, INT_MIN) != 0) { // Contentions was no longer 0 so we lost the race since the - // ObjectMonitor is now busy. Restore owner to NULL if it is + // ObjectMonitor is now busy. Restore owner to nullptr if it is // still DEFLATER_MARKER: - if (try_set_owner_from(DEFLATER_MARKER, NULL) != DEFLATER_MARKER) { + if (try_set_owner_from(DEFLATER_MARKER, nullptr) != DEFLATER_MARKER) { // Deferred decrement for the JT EnterI() that cancelled the async deflation. add_to_contentions(-1); } @@ -576,13 +576,13 @@ bool ObjectMonitor::deflate_monitor() { guarantee(contentions() < 0, "must be negative: contentions=%d", contentions()); guarantee(_waiters == 0, "must be 0: waiters=%d", _waiters); - guarantee(_cxq == NULL, "must be no contending threads: cxq=" + guarantee(_cxq == nullptr, "must be no contending threads: cxq=" INTPTR_FORMAT, p2i(_cxq)); - guarantee(_EntryList == NULL, + guarantee(_EntryList == nullptr, "must be no entering threads: EntryList=" INTPTR_FORMAT, p2i(_EntryList)); - if (obj != NULL) { + if (obj != nullptr) { if (log_is_enabled(Trace, monitorinflation)) { ResourceMark rm; log_trace(monitorinflation)("deflate_monitor: object=" INTPTR_FORMAT @@ -611,7 +611,7 @@ void ObjectMonitor::install_displaced_markword_in_object(const oop obj) { // those values could change when the ObjectMonitor gets moved from // the global free list to a per-thread free list. - guarantee(obj != NULL, "must be non-NULL"); + guarantee(obj != nullptr, "must be non-null"); // Separate loads in is_being_async_deflated(), which is almost always // called before this function, from the load of dmw/header below. @@ -621,7 +621,7 @@ void ObjectMonitor::install_displaced_markword_in_object(const oop obj) { OrderAccess::loadload_for_IRIW(); const oop l_object = object_peek(); - if (l_object == NULL) { + if (l_object == nullptr) { // ObjectMonitor's object ref has already been cleared by async // deflation or GC so we're done here. return; @@ -630,7 +630,7 @@ void ObjectMonitor::install_displaced_markword_in_object(const oop obj) { INTPTR_FORMAT, p2i(l_object), p2i(obj)); markWord dmw = header(); - // The dmw has to be neutral (not NULL, not locked and not marked). + // The dmw has to be neutral (not null, not locked and not marked). assert(dmw.is_neutral(), "must be neutral: dmw=" INTPTR_FORMAT, dmw.value()); // Install displaced mark word if the object's header still points @@ -666,7 +666,7 @@ const char* ObjectMonitor::is_busy_to_string(stringStream* ss) { if (!owner_is_DEFLATER_MARKER()) { ss->print("owner=" INTPTR_FORMAT, p2i(owner_raw())); } else { - // We report NULL instead of DEFLATER_MARKER here because is_busy() + // We report nullptr instead of DEFLATER_MARKER here because is_busy() // ignores DEFLATER_MARKER values. ss->print("owner=" INTPTR_FORMAT, NULL_WORD); } @@ -783,7 +783,7 @@ void ObjectMonitor::EnterI(JavaThread* current) { // timer scalability issues we see on some platforms as we'd only have one thread // -- the checker -- parked on a timer. - if (nxt == NULL && _EntryList == NULL) { + if (nxt == nullptr && _EntryList == nullptr) { // Try to assume the role of responsible thread for the monitor. // CONSIDER: ST vs CAS vs { if (Responsible==null) Responsible=current } Atomic::replace_if_null(&_Responsible, current); @@ -861,7 +861,7 @@ void ObjectMonitor::EnterI(JavaThread* current) { // just spin again. This pattern can repeat, leaving _succ to simply // spin on a CPU. - if (_succ == current) _succ = NULL; + if (_succ == current) _succ = nullptr; // Invariant: after clearing _succ a thread *must* retry _owner before parking. OrderAccess::fence(); @@ -878,11 +878,11 @@ void ObjectMonitor::EnterI(JavaThread* current) { assert(owner_raw() == current, "invariant"); UnlinkAfterAcquire(current, &node); - if (_succ == current) _succ = NULL; + if (_succ == current) _succ = nullptr; assert(_succ != current, "invariant"); if (_Responsible == current) { - _Responsible = NULL; + _Responsible = nullptr; OrderAccess::fence(); // Dekker pivot-point // We may leave threads on cxq|EntryList without a designated @@ -938,8 +938,8 @@ void ObjectMonitor::EnterI(JavaThread* current) { // In the future we should reconcile EnterI() and ReenterI(). void ObjectMonitor::ReenterI(JavaThread* current, ObjectWaiter* currentNode) { - assert(current != NULL, "invariant"); - assert(currentNode != NULL, "invariant"); + assert(current != nullptr, "invariant"); + assert(currentNode != nullptr, "invariant"); assert(currentNode->_thread == current, "invariant"); assert(_waiters > 0, "invariant"); assert(object()->mark() == markWord::encode(this), "invariant"); @@ -981,7 +981,7 @@ void ObjectMonitor::ReenterI(JavaThread* current, ObjectWaiter* currentNode) { // Assuming this is not a spurious wakeup we'll normally // find that _succ == current. - if (_succ == current) _succ = NULL; + if (_succ == current) _succ = nullptr; // Invariant: after clearing _succ a contending thread // *must* retry _owner before parking. @@ -1003,7 +1003,7 @@ void ObjectMonitor::ReenterI(JavaThread* current, ObjectWaiter* currentNode) { assert(owner_raw() == current, "invariant"); assert(object()->mark() == markWord::encode(this), "invariant"); UnlinkAfterAcquire(current, currentNode); - if (_succ == current) _succ = NULL; + if (_succ == current) _succ = nullptr; assert(_succ != current, "invariant"); currentNode->TState = ObjectWaiter::TS_RUN; OrderAccess::fence(); // see comments at the end of EnterI() @@ -1022,11 +1022,11 @@ void ObjectMonitor::UnlinkAfterAcquire(JavaThread* current, ObjectWaiter* curren // This is a constant-time operation. ObjectWaiter* nxt = currentNode->_next; ObjectWaiter* prv = currentNode->_prev; - if (nxt != NULL) nxt->_prev = prv; - if (prv != NULL) prv->_next = nxt; + if (nxt != nullptr) nxt->_prev = prv; + if (prv != nullptr) prv->_next = nxt; if (currentNode == _EntryList) _EntryList = nxt; - assert(nxt == NULL || nxt->TState == ObjectWaiter::TS_ENTER, "invariant"); - assert(prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant"); + assert(nxt == nullptr || nxt->TState == ObjectWaiter::TS_ENTER, "invariant"); + assert(prv == nullptr || prv->TState == ObjectWaiter::TS_ENTER, "invariant"); } else { assert(currentNode->TState == ObjectWaiter::TS_CXQ, "invariant"); // Inopportune interleaving -- current is still on the cxq. @@ -1043,7 +1043,7 @@ void ObjectMonitor::UnlinkAfterAcquire(JavaThread* current, ObjectWaiter* curren // so it might as well be now. ObjectWaiter* v = _cxq; - assert(v != NULL, "invariant"); + assert(v != nullptr, "invariant"); if (v != currentNode || Atomic::cmpxchg(&_cxq, v, currentNode->_next) != v) { // The CAS above can fail from interference IFF a "RAT" arrived. // In that case current must be in the interior and can no longer be @@ -1053,15 +1053,15 @@ void ObjectMonitor::UnlinkAfterAcquire(JavaThread* current, ObjectWaiter* curren v = _cxq; // CAS above failed - start scan at head of list } ObjectWaiter* p; - ObjectWaiter* q = NULL; - for (p = v; p != NULL && p != currentNode; p = p->_next) { + ObjectWaiter* q = nullptr; + for (p = v; p != nullptr && p != currentNode; p = p->_next) { q = p; assert(p->TState == ObjectWaiter::TS_CXQ, "invariant"); } assert(v != currentNode, "invariant"); assert(p == currentNode, "Node not found on cxq"); assert(p != _cxq, "invariant"); - assert(q != NULL, "invariant"); + assert(q != nullptr, "invariant"); assert(q->_next == p, "invariant"); q->_next = p->_next; } @@ -1168,7 +1168,7 @@ void ObjectMonitor::exit(JavaThread* current, bool not_suspended) { // Invariant: after setting Responsible=null an thread must execute // a MEMBAR or other serializing instruction before fetching EntryList|cxq. - _Responsible = NULL; + _Responsible = nullptr; #if INCLUDE_JFR // get the owner's thread id for the MonitorEnter event @@ -1190,14 +1190,14 @@ void ObjectMonitor::exit(JavaThread* current, bool not_suspended) { release_clear_owner(current); OrderAccess::storeload(); - if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) { + if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != nullptr) { return; } // Other threads are blocked trying to acquire the lock. // Normally the exiting thread is responsible for ensuring succession, // but if other successors are ready or other entering threads are spinning - // then this thread can simply store NULL into _owner and exit without + // then this thread can simply store null into _owner and exit without // waking a successor. The existence of spinners or ready successors // guarantees proper succession (liveness). Responsibility passes to the // ready or running successors. The exiting thread delegates the duty. @@ -1231,20 +1231,20 @@ void ObjectMonitor::exit(JavaThread* current, bool not_suspended) { // to reacquire the lock the responsibility for ensuring succession // falls to the new owner. // - if (try_set_owner_from(NULL, current) != NULL) { + if (try_set_owner_from(nullptr, current) != nullptr) { return; } guarantee(owner_raw() == current, "invariant"); - ObjectWaiter* w = NULL; + ObjectWaiter* w = nullptr; w = _EntryList; - if (w != NULL) { + if (w != nullptr) { // I'd like to write: guarantee (w->_thread != current). // But in practice an exiting thread may find itself on the EntryList. // Let's say thread T1 calls O.wait(). Wait() enqueues T1 on O's waitset and - // then calls exit(). Exit release the lock by setting O._owner to NULL. + // then calls exit(). Exit release the lock by setting O._owner to null. // Let's say T1 then stalls. T2 acquires O and calls O.notify(). The // notify() operation moves T1 from O's waitset to O's EntryList. T2 then // release the lock "O". T2 resumes immediately after the ST of null into @@ -1260,20 +1260,20 @@ void ObjectMonitor::exit(JavaThread* current, bool not_suspended) { // If we find that both _cxq and EntryList are null then just // re-run the exit protocol from the top. w = _cxq; - if (w == NULL) continue; + if (w == nullptr) continue; // Drain _cxq into EntryList - bulk transfer. // First, detach _cxq. - // The following loop is tantamount to: w = swap(&cxq, NULL) + // The following loop is tantamount to: w = swap(&cxq, nullptr) for (;;) { - assert(w != NULL, "Invariant"); - ObjectWaiter* u = Atomic::cmpxchg(&_cxq, w, (ObjectWaiter*)NULL); + assert(w != nullptr, "Invariant"); + ObjectWaiter* u = Atomic::cmpxchg(&_cxq, w, (ObjectWaiter*)nullptr); if (u == w) break; w = u; } - assert(w != NULL, "invariant"); - assert(_EntryList == NULL, "invariant"); + assert(w != nullptr, "invariant"); + assert(_EntryList == nullptr, "invariant"); // Convert the LIFO SLL anchored by _cxq into a DLL. // The list reorganization step operates in O(LENGTH(w)) time. @@ -1285,25 +1285,25 @@ void ObjectMonitor::exit(JavaThread* current, bool not_suspended) { // we have faster access to the tail. _EntryList = w; - ObjectWaiter* q = NULL; + ObjectWaiter* q = nullptr; ObjectWaiter* p; - for (p = w; p != NULL; p = p->_next) { + for (p = w; p != nullptr; p = p->_next) { guarantee(p->TState == ObjectWaiter::TS_CXQ, "Invariant"); p->TState = ObjectWaiter::TS_ENTER; p->_prev = q; q = p; } - // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = NULL + // In 1-0 mode we need: ST EntryList; MEMBAR #storestore; ST _owner = nullptr // The MEMBAR is satisfied by the release_store() operation in ExitEpilog(). // See if we can abdicate to a spinner instead of waking a thread. // A primary goal of the implementation is to reduce the // context-switch rate. - if (_succ != NULL) continue; + if (_succ != nullptr) continue; w = _EntryList; - if (w != NULL) { + if (w != nullptr) { guarantee(w->TState == ObjectWaiter::TS_ENTER, "invariant"); ExitEpilog(current, w); return; @@ -1317,16 +1317,16 @@ void ObjectMonitor::ExitEpilog(JavaThread* current, ObjectWaiter* Wakee) { // Exit protocol: // 1. ST _succ = wakee // 2. membar #loadstore|#storestore; - // 2. ST _owner = NULL + // 2. ST _owner = nullptr // 3. unpark(wakee) _succ = Wakee->_thread; ParkEvent * Trigger = Wakee->_event; - // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again. + // Hygiene -- once we've set _owner = nullptr we can't safely dereference Wakee again. // The thread associated with Wakee may have grabbed the lock and "Wakee" may be // out-of-scope (non-extant). - Wakee = NULL; + Wakee = nullptr; // Drop the lock. // Uses a fence to separate release_store(owner) from the LD in unpark(). @@ -1427,8 +1427,8 @@ static void post_monitor_wait_event(EventJavaMonitorWait* event, uint64_t notifier_tid, jlong timeout, bool timedout) { - assert(event != NULL, "invariant"); - assert(monitor != NULL, "invariant"); + assert(event != nullptr, "invariant"); + assert(monitor != nullptr, "invariant"); const Klass* monitor_klass = monitor->object()->klass(); if (is_excluded(monitor_klass)) { return; @@ -1504,7 +1504,7 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) { AddWaiter(&node); Thread::SpinRelease(&_WaitSetLock); - _Responsible = NULL; + _Responsible = nullptr; intx save = _recursions; // record the old recursion count _waiters++; // increment the number of waiters @@ -1576,7 +1576,7 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) { // No other threads will asynchronously modify TState. guarantee(node.TState != ObjectWaiter::TS_WAIT, "invariant"); OrderAccess::loadload(); - if (_succ == current) _succ = NULL; + if (_succ == current) _succ = nullptr; WasNotified = node._notified; // Reentry phase -- reacquire the monitor. @@ -1638,7 +1638,7 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) { assert(_succ != current, "invariant"); } // OSThreadWaitState() - current->set_current_waiting_monitor(NULL); + current->set_current_waiting_monitor(nullptr); guarantee(_recursions == 0, "invariant"); int relock_count = JvmtiDeferredUpdates::get_and_reset_relock_count_after_wait(current); @@ -1674,7 +1674,7 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) { void ObjectMonitor::INotify(JavaThread* current) { Thread::SpinAcquire(&_WaitSetLock, "WaitSet - notify"); ObjectWaiter* iterator = DequeueWaiter(); - if (iterator != NULL) { + if (iterator != nullptr) { guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant"); guarantee(iterator->_notified == 0, "invariant"); // Disposition - what might we do with iterator ? @@ -1689,15 +1689,15 @@ void ObjectMonitor::INotify(JavaThread* current) { iterator->_notifier_tid = JFR_THREAD_ID(current); ObjectWaiter* list = _EntryList; - if (list != NULL) { - assert(list->_prev == NULL, "invariant"); + if (list != nullptr) { + assert(list->_prev == nullptr, "invariant"); assert(list->TState == ObjectWaiter::TS_ENTER, "invariant"); assert(list != iterator, "invariant"); } // prepend to cxq - if (list == NULL) { - iterator->_next = iterator->_prev = NULL; + if (list == nullptr) { + iterator->_next = iterator->_prev = nullptr; _EntryList = iterator; } else { iterator->TState = ObjectWaiter::TS_CXQ; @@ -1736,7 +1736,7 @@ void ObjectMonitor::INotify(JavaThread* current) { void ObjectMonitor::notify(TRAPS) { JavaThread* current = THREAD; CHECK_OWNER(); // Throws IMSE if not owner. - if (_WaitSet == NULL) { + if (_WaitSet == nullptr) { return; } DTRACE_MONITOR_PROBE(notify, this, object(), current); @@ -1755,13 +1755,13 @@ void ObjectMonitor::notify(TRAPS) { void ObjectMonitor::notifyAll(TRAPS) { JavaThread* current = THREAD; CHECK_OWNER(); // Throws IMSE if not owner. - if (_WaitSet == NULL) { + if (_WaitSet == nullptr) { return; } DTRACE_MONITOR_PROBE(notifyAll, this, object(), current); int tally = 0; - while (_WaitSet != NULL) { + while (_WaitSet != nullptr) { tally++; INotify(current); } @@ -1887,10 +1887,10 @@ int ObjectMonitor::TrySpin(JavaThread* current) { // CONSIDER: use Prefetch::write() to avoid RTS->RTO upgrades // when preparing to LD...CAS _owner, etc and the CAS is likely // to succeed. - if (_succ == NULL) { + if (_succ == nullptr) { _succ = current; } - Thread* prv = NULL; + Thread* prv = nullptr; // There are three ways to exit the following loop: // 1. A successful spin where this thread has acquired the lock. @@ -1929,13 +1929,13 @@ int ObjectMonitor::TrySpin(JavaThread* current) { // spin count-down variable "ctr", reducing it by 100, say. JavaThread* ox = static_cast(owner_raw()); - if (ox == NULL) { - ox = static_cast(try_set_owner_from(NULL, current)); - if (ox == NULL) { + if (ox == nullptr) { + ox = static_cast(try_set_owner_from(nullptr, current)); + if (ox == nullptr) { // The CAS succeeded -- this thread acquired ownership // Take care of some bookkeeping to exit spin state. if (_succ == current) { - _succ = NULL; + _succ = nullptr; } // Increase _SpinDuration : @@ -1963,7 +1963,7 @@ int ObjectMonitor::TrySpin(JavaThread* current) { } // Did lock ownership change hands ? - if (ox != prv && prv != NULL) { + if (ox != prv && prv != nullptr) { goto Abort; } prv = ox; @@ -1975,7 +1975,7 @@ int ObjectMonitor::TrySpin(JavaThread* current) { if (NotRunnable(current, ox)) { goto Abort; } - if (_succ == NULL) { + if (_succ == nullptr) { _succ = current; } } @@ -1996,7 +1996,7 @@ int ObjectMonitor::TrySpin(JavaThread* current) { Abort: if (_succ == current) { - _succ = NULL; + _succ = nullptr; // Invariant: after setting succ=null a contending thread // must recheck-retry _owner before parking. This usually happens // in the normal usage of TrySpin(), but it's safest @@ -2041,7 +2041,7 @@ int ObjectMonitor::TrySpin(JavaThread* current) { int ObjectMonitor::NotRunnable(JavaThread* current, JavaThread* ox) { // Check ox->TypeTag == 2BAD. - if (ox == NULL) return 0; + if (ox == nullptr) return 0; // Avoid transitive spinning ... // Say T1 spins or blocks trying to acquire L. T1._Stalled is set to L. @@ -2068,15 +2068,15 @@ int ObjectMonitor::NotRunnable(JavaThread* current, JavaThread* ox) { // WaitSet management ... ObjectWaiter::ObjectWaiter(JavaThread* current) { - _next = NULL; - _prev = NULL; + _next = nullptr; + _prev = nullptr; _notified = 0; _notifier_tid = 0; TState = TS_RUN; _thread = current; _event = _thread->_ParkEvent; _active = false; - assert(_event != NULL, "invariant"); + assert(_event != nullptr, "invariant"); } void ObjectWaiter::wait_reenter_begin(ObjectMonitor * const mon) { @@ -2088,11 +2088,11 @@ void ObjectWaiter::wait_reenter_end(ObjectMonitor * const mon) { } inline void ObjectMonitor::AddWaiter(ObjectWaiter* node) { - assert(node != NULL, "should not add NULL node"); - assert(node->_prev == NULL, "node already in list"); - assert(node->_next == NULL, "node already in list"); + assert(node != nullptr, "should not add null node"); + assert(node->_prev == nullptr, "node already in list"); + assert(node->_next == nullptr, "node already in list"); // put node at end of queue (circular doubly linked list) - if (_WaitSet == NULL) { + if (_WaitSet == nullptr) { _WaitSet = node; node->_prev = node; node->_next = node; @@ -2117,16 +2117,16 @@ inline ObjectWaiter* ObjectMonitor::DequeueWaiter() { } inline void ObjectMonitor::DequeueSpecificWaiter(ObjectWaiter* node) { - assert(node != NULL, "should not dequeue NULL node"); - assert(node->_prev != NULL, "node already removed from list"); - assert(node->_next != NULL, "node already removed from list"); + assert(node != nullptr, "should not dequeue nullptr node"); + assert(node->_prev != nullptr, "node already removed from list"); + assert(node->_next != nullptr, "node already removed from list"); // when the waiter has woken up because of interrupt, // timeout or other spurious wake-up, dequeue the // waiter from waiting list ObjectWaiter* next = node->_next; if (next == node) { assert(node->_prev == node, "invariant check"); - _WaitSet = NULL; + _WaitSet = nullptr; } else { ObjectWaiter* prev = node->_prev; assert(prev->_next == node, "invariant check"); @@ -2137,19 +2137,19 @@ inline void ObjectMonitor::DequeueSpecificWaiter(ObjectWaiter* node) { _WaitSet = next; } } - node->_next = NULL; - node->_prev = NULL; + node->_next = nullptr; + node->_prev = nullptr; } // ----------------------------------------------------------------------------- // PerfData support -PerfCounter * ObjectMonitor::_sync_ContendedLockAttempts = NULL; -PerfCounter * ObjectMonitor::_sync_FutileWakeups = NULL; -PerfCounter * ObjectMonitor::_sync_Parks = NULL; -PerfCounter * ObjectMonitor::_sync_Notifications = NULL; -PerfCounter * ObjectMonitor::_sync_Inflations = NULL; -PerfCounter * ObjectMonitor::_sync_Deflations = NULL; -PerfLongVariable * ObjectMonitor::_sync_MonExtant = NULL; +PerfCounter * ObjectMonitor::_sync_ContendedLockAttempts = nullptr; +PerfCounter * ObjectMonitor::_sync_FutileWakeups = nullptr; +PerfCounter * ObjectMonitor::_sync_Parks = nullptr; +PerfCounter * ObjectMonitor::_sync_Notifications = nullptr; +PerfCounter * ObjectMonitor::_sync_Inflations = nullptr; +PerfCounter * ObjectMonitor::_sync_Deflations = nullptr; +PerfLongVariable * ObjectMonitor::_sync_MonExtant = nullptr; // One-shot global initialization for the sync subsystem. // We could also defer initialization and initialize on-demand diff --git a/src/hotspot/share/runtime/objectMonitor.hpp b/src/hotspot/share/runtime/objectMonitor.hpp index 3a6106d09a1f4..285232f6c4dae 100644 --- a/src/hotspot/share/runtime/objectMonitor.hpp +++ b/src/hotspot/share/runtime/objectMonitor.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -184,12 +184,12 @@ class ObjectMonitor : public CHeapObj { // allocated and if the PerfDataManager has not freed the PerfData // objects which can happen at normal VM shutdown. // - #define OM_PERFDATA_OP(f, op_str) \ - do { \ - if (ObjectMonitor::_sync_ ## f != NULL && \ - PerfDataManager::has_PerfData()) { \ - ObjectMonitor::_sync_ ## f->op_str; \ - } \ + #define OM_PERFDATA_OP(f, op_str) \ + do { \ + if (ObjectMonitor::_sync_ ## f != nullptr && \ + PerfDataManager::has_PerfData()) { \ + ObjectMonitor::_sync_ ## f->op_str; \ + } \ } while (0) static PerfCounter * _sync_ContendedLockAttempts; @@ -246,7 +246,7 @@ class ObjectMonitor : public CHeapObj { // Returns true if this OM has an owner, false otherwise. bool has_owner() const; - void* owner() const; // Returns NULL if DEFLATER_MARKER is observed. + void* owner() const; // Returns null if DEFLATER_MARKER is observed. void* owner_raw() const; // Returns true if owner field == DEFLATER_MARKER and false otherwise. bool owner_is_DEFLATER_MARKER() const; diff --git a/src/hotspot/share/runtime/objectMonitor.inline.hpp b/src/hotspot/share/runtime/objectMonitor.inline.hpp index 1944141f065c6..800585a9fbbb5 100644 --- a/src/hotspot/share/runtime/objectMonitor.inline.hpp +++ b/src/hotspot/share/runtime/objectMonitor.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -58,13 +58,13 @@ inline int ObjectMonitor::waiters() const { inline bool ObjectMonitor::has_owner() const { void* owner = owner_raw(); - return owner != NULL && owner != DEFLATER_MARKER; + return owner != nullptr && owner != DEFLATER_MARKER; } -// Returns NULL if DEFLATER_MARKER is observed. +// Returns null if DEFLATER_MARKER is observed. inline void* ObjectMonitor::owner() const { void* owner = owner_raw(); - return owner != DEFLATER_MARKER ? owner : NULL; + return owner != DEFLATER_MARKER ? owner : nullptr; } inline void* ObjectMonitor::owner_raw() const { @@ -73,7 +73,7 @@ inline void* ObjectMonitor::owner_raw() const { // Returns true if owner field == DEFLATER_MARKER and false otherwise. // This accessor is called when we really need to know if the owner -// field == DEFLATER_MARKER and any non-NULL value won't do the trick. +// field == DEFLATER_MARKER and any non-null value won't do the trick. inline bool ObjectMonitor::owner_is_DEFLATER_MARKER() const { return owner_raw() == DEFLATER_MARKER; } @@ -100,7 +100,7 @@ inline void ObjectMonitor::release_clear_owner(void* old_value) { assert(prev == old_value, "unexpected prev owner=" INTPTR_FORMAT ", expected=" INTPTR_FORMAT, p2i(prev), p2i(old_value)); #endif - Atomic::release_store(&_owner, (void*)NULL); + Atomic::release_store(&_owner, (void*)nullptr); log_trace(monitorinflation, owner)("release_clear_owner(): mid=" INTPTR_FORMAT ", old_value=" INTPTR_FORMAT, p2i(this), p2i(old_value)); diff --git a/src/hotspot/share/runtime/orderAccess.cpp b/src/hotspot/share/runtime/orderAccess.cpp index b0f0c8e4c03e5..ec686b530c84a 100644 --- a/src/hotspot/share/runtime/orderAccess.cpp +++ b/src/hotspot/share/runtime/orderAccess.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,7 +36,7 @@ void OrderAccess::StubRoutines_fence() { // nothing in that case but assert if no fence code exists after threads have been created void (*func)() = CAST_TO_FN_PTR(void (*)(), StubRoutines::fence_entry()); - if (func != NULL) { + if (func != nullptr) { (*func)(); return; } diff --git a/src/hotspot/share/runtime/os.cpp b/src/hotspot/share/runtime/os.cpp index 8968adeab9ae1..b98d673182b5e 100644 --- a/src/hotspot/share/runtime/os.cpp +++ b/src/hotspot/share/runtime/os.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -80,8 +80,8 @@ # include # include -OSThread* os::_starting_thread = NULL; -address os::_polling_page = NULL; +OSThread* os::_starting_thread = nullptr; +address os::_polling_page = nullptr; volatile unsigned int os::_rand_seed = 1234567; int os::_processor_count = 0; int os::_initial_active_processor_count = 0; @@ -111,7 +111,7 @@ int os::snprintf_checked(char* buf, size_t len, const char* fmt, ...) { // Fill in buffer with current local time as an ISO-8601 string. // E.g., YYYY-MM-DDThh:mm:ss.mmm+zzzz. -// Returns buffer, or NULL if it failed. +// Returns buffer, or null if it failed. char* os::iso8601_time(char* buffer, size_t buffer_length, bool utc) { const jlong now = javaTimeMillis(); return os::iso8601_time(now, buffer, buffer_length, utc); @@ -119,7 +119,7 @@ char* os::iso8601_time(char* buffer, size_t buffer_length, bool utc) { // Fill in buffer with an ISO-8601 string corresponding to the given javaTimeMillis value // E.g., yyyy-mm-ddThh:mm:ss-zzzz. -// Returns buffer, or NULL if it failed. +// Returns buffer, or null if it failed. // This would mostly be a call to // strftime(...., "%Y-%m-%d" "T" "%H:%M:%S" "%z", ....) // except that on Windows the %z behaves badly, so we do it ourselves. @@ -129,13 +129,13 @@ char* os::iso8601_time(jlong milliseconds_since_19700101, char* buffer, size_t b // Output will be of the form "YYYY-MM-DDThh:mm:ss.mmm+zzzz\0" // Sanity check the arguments - if (buffer == NULL) { - assert(false, "NULL buffer"); - return NULL; + if (buffer == nullptr) { + assert(false, "null buffer"); + return nullptr; } if (buffer_length < os::iso8601_timestamp_size) { assert(false, "buffer_length too small"); - return NULL; + return nullptr; } const int milliseconds_per_microsecond = 1000; const time_t seconds_since_19700101 = @@ -145,14 +145,14 @@ char* os::iso8601_time(jlong milliseconds_since_19700101, char* buffer, size_t b // Convert the time value to a tm and timezone variable struct tm time_struct; if (utc) { - if (gmtime_pd(&seconds_since_19700101, &time_struct) == NULL) { + if (gmtime_pd(&seconds_since_19700101, &time_struct) == nullptr) { assert(false, "Failed gmtime_pd"); - return NULL; + return nullptr; } } else { - if (localtime_pd(&seconds_since_19700101, &time_struct) == NULL) { + if (localtime_pd(&seconds_since_19700101, &time_struct) == nullptr) { assert(false, "Failed localtime_pd"); - return NULL; + return nullptr; } } @@ -218,7 +218,7 @@ char* os::iso8601_time(jlong milliseconds_since_19700101, char* buffer, size_t b zone_min); if (printed == 0) { assert(false, "Failed jio_printf"); - return NULL; + return nullptr; } return buffer; } @@ -293,7 +293,7 @@ static bool conc_path_file_and_check(char *buffer, char *printbuffer, size_t pri static void free_array_of_char_arrays(char** a, size_t n) { while (n > 0) { n--; - if (a[n] != NULL) { + if (a[n] != nullptr) { FREE_C_HEAP_ARRAY(char, a[n]); } } @@ -312,21 +312,21 @@ bool os::dll_locate_lib(char *buffer, size_t buflen, if (pnamelen == 0) { // If no path given, use current working directory. const char* p = get_current_directory(buffer, buflen); - if (p != NULL) { + if (p != nullptr) { const size_t plen = strlen(buffer); const char lastchar = buffer[plen - 1]; retval = conc_path_file_and_check(buffer, &buffer[plen], buflen - plen, "", lastchar, fullfname); } - } else if (strchr(pname, *os::path_separator()) != NULL) { + } else if (strchr(pname, *os::path_separator()) != nullptr) { // A list of paths. Search for the path that contains the library. size_t n; char** pelements = split_path(pname, &n, fullfnamelen); - if (pelements != NULL) { + if (pelements != nullptr) { for (size_t i = 0; i < n; i++) { char* path = pelements[i]; - // Really shouldn't be NULL, but check can't hurt. - size_t plen = (path == NULL) ? 0 : strlen(path); + // Really shouldn't be null, but check can't hurt. + size_t plen = (path == nullptr) ? 0 : strlen(path); if (plen == 0) { continue; // Skip the empty path values. } @@ -425,7 +425,7 @@ static void signal_thread_entry(JavaThread* thread, TRAPS) { // Dispatch the signal to java HandleMark hm(THREAD); Klass* klass = SystemDictionary::resolve_or_null(vmSymbols::jdk_internal_misc_Signal(), THREAD); - if (klass != NULL) { + if (klass != nullptr) { JavaValue result(T_VOID); JavaCallArguments args; args.push_int(sig); @@ -442,13 +442,13 @@ static void signal_thread_entry(JavaThread* thread, TRAPS) { // tty is initialized early so we don't expect it to be null, but // if it is we can't risk doing an initialization that might // trigger additional out-of-memory conditions - if (tty != NULL) { + if (tty != nullptr) { char klass_name[256]; char tmp_sig_name[16]; const char* sig_name = "UNKNOWN"; InstanceKlass::cast(PENDING_EXCEPTION->klass())-> name()->as_klass_external_name(klass_name, 256); - if (os::exception_name(sig, tmp_sig_name, 16) != NULL) + if (os::exception_name(sig, tmp_sig_name, 16) != nullptr) sig_name = tmp_sig_name; warning("Exception %s occurred dispatching signal %s to handler" "- the VM may need to be forcibly terminated", @@ -499,10 +499,10 @@ void os::terminate_signal_thread() { typedef jint (JNICALL *JNI_OnLoad_t)(JavaVM *, void *); extern struct JavaVM_ main_vm; -static void* _native_java_library = NULL; +static void* _native_java_library = nullptr; void* os::native_java_library() { - if (_native_java_library == NULL) { + if (_native_java_library == nullptr) { char buffer[JVM_MAXPATHLEN]; char ebuf[1024]; @@ -511,7 +511,7 @@ void* os::native_java_library() { "java")) { _native_java_library = dll_load(buffer, ebuf, sizeof(ebuf)); } - if (_native_java_library == NULL) { + if (_native_java_library == nullptr) { vm_exit_during_initialization("Unable to load native library", ebuf); } @@ -538,24 +538,24 @@ void* os::native_java_library() { */ void* os::find_agent_function(AgentLibrary *agent_lib, bool check_lib, const char *syms[], size_t syms_len) { - assert(agent_lib != NULL, "sanity check"); + assert(agent_lib != nullptr, "sanity check"); const char *lib_name; void *handle = agent_lib->os_lib(); - void *entryName = NULL; + void *entryName = nullptr; char *agent_function_name; size_t i; // If checking then use the agent name otherwise test is_static_lib() to // see how to process this lookup - lib_name = ((check_lib || agent_lib->is_static_lib()) ? agent_lib->name() : NULL); + lib_name = ((check_lib || agent_lib->is_static_lib()) ? agent_lib->name() : nullptr); for (i = 0; i < syms_len; i++) { agent_function_name = build_agent_function_name(syms[i], lib_name, agent_lib->is_absolute_path()); - if (agent_function_name == NULL) { + if (agent_function_name == nullptr) { break; } entryName = dll_lookup(handle, agent_function_name); FREE_C_HEAP_ARRAY(char, agent_function_name); - if (entryName != NULL) { + if (entryName != nullptr) { break; } } @@ -569,8 +569,8 @@ bool os::find_builtin_agent(AgentLibrary *agent_lib, const char *syms[], void *proc_handle; void *save_handle; - assert(agent_lib != NULL, "sanity check"); - if (agent_lib->name() == NULL) { + assert(agent_lib != nullptr, "sanity check"); + if (agent_lib->name() == nullptr) { return false; } proc_handle = get_default_process_handle(); @@ -579,7 +579,7 @@ bool os::find_builtin_agent(AgentLibrary *agent_lib, const char *syms[], // We want to look in this process' symbol table. agent_lib->set_os_lib(proc_handle); ret = find_agent_function(agent_lib, true, syms, syms_len); - if (ret != NULL) { + if (ret != nullptr) { // Found an entry point like Agent_OnLoad_lib_name so we have a static agent agent_lib->set_valid(); agent_lib->set_static_lib(true); @@ -594,14 +594,14 @@ bool os::find_builtin_agent(AgentLibrary *agent_lib, const char *syms[], char *os::strdup(const char *str, MEMFLAGS flags) { size_t size = strlen(str); char *dup_str = (char *)malloc(size + 1, flags); - if (dup_str == NULL) return NULL; + if (dup_str == nullptr) return nullptr; strcpy(dup_str, str); return dup_str; } char* os::strdup_check_oom(const char* str, MEMFLAGS flags) { char* p = os::strdup(str, flags); - if (p == NULL) { + if (p == nullptr) { vm_exit_out_of_memory(strlen(str) + 1, OOM_MALLOC_ERROR, "os::strdup_check_oom"); } return p; @@ -644,7 +644,7 @@ void* os::malloc(size_t size, MEMFLAGS flags) { void* os::malloc(size_t size, MEMFLAGS memflags, const NativeCallStack& stack) { // Special handling for NMT preinit phase before arguments are parsed - void* rc = NULL; + void* rc = nullptr; if (NMTPreInit::handle_malloc(&rc, size)) { // No need to fill with 0 because DumpSharedSpaces doesn't use these // early allocations. @@ -654,25 +654,25 @@ void* os::malloc(size_t size, MEMFLAGS memflags, const NativeCallStack& stack) { DEBUG_ONLY(check_crash_protection()); // On malloc(0), implementations of malloc(3) have the choice to return either - // NULL or a unique non-NULL pointer. To unify libc behavior across our platforms + // null or a unique non-null pointer. To unify libc behavior across our platforms // we chose the latter. size = MAX2((size_t)1, size); // For the test flag -XX:MallocMaxTestWords if (has_reached_max_malloc_test_peak(size)) { - return NULL; + return nullptr; } const size_t outer_size = size + MemTracker::overhead_per_malloc(); // Check for overflow. if (outer_size < size) { - return NULL; + return nullptr; } ALLOW_C_FUNCTION(::malloc, void* const outer_ptr = ::malloc(outer_size);) - if (outer_ptr == NULL) { - return NULL; + if (outer_ptr == nullptr) { + return nullptr; } void* const inner_ptr = MemTracker::record_malloc((address)outer_ptr, size, memflags, stack); @@ -694,25 +694,25 @@ void* os::realloc(void *memblock, size_t size, MEMFLAGS flags) { void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCallStack& stack) { // Special handling for NMT preinit phase before arguments are parsed - void* rc = NULL; + void* rc = nullptr; if (NMTPreInit::handle_realloc(&rc, memblock, size)) { return rc; } - if (memblock == NULL) { + if (memblock == nullptr) { return os::malloc(size, memflags, stack); } DEBUG_ONLY(check_crash_protection()); // On realloc(p, 0), implementers of realloc(3) have the choice to return either - // NULL or a unique non-NULL pointer. To unify libc behavior across our platforms + // null or a unique non-null pointer. To unify libc behavior across our platforms // we chose the latter. size = MAX2((size_t)1, size); // For the test flag -XX:MallocMaxTestWords if (has_reached_max_malloc_test_peak(size)) { - return NULL; + return nullptr; } if (MemTracker::enabled()) { @@ -722,7 +722,7 @@ void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCa // Handle size overflow. if (new_outer_size < size) { - return NULL; + return nullptr; } // Perform integrity checks on and mark the old block as dead *before* calling the real realloc(3) since it @@ -735,7 +735,7 @@ void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCa // the real realloc ALLOW_C_FUNCTION(::realloc, void* const new_outer_ptr = ::realloc(header, new_outer_size);) - if (new_outer_ptr == NULL) { + if (new_outer_ptr == nullptr) { // realloc(3) failed and the block still exists. // We have however marked it as dead, revert this change. header->revive(); @@ -762,8 +762,8 @@ void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCa // NMT disabled. ALLOW_C_FUNCTION(::realloc, rc = ::realloc(memblock, size);) - if (rc == NULL) { - return NULL; + if (rc == nullptr) { + return nullptr; } } @@ -780,7 +780,7 @@ void os::free(void *memblock) { return; } - if (memblock == NULL) { + if (memblock == nullptr) { return; } @@ -860,7 +860,7 @@ void os::start_thread(Thread* thread) { } void os::abort(bool dump_core) { - abort(dump_core && CreateCoredumpOnCrash, NULL, NULL); + abort(dump_core && CreateCoredumpOnCrash, nullptr, nullptr); } //--------------------------------------------------------------------------- @@ -876,7 +876,7 @@ bool os::print_function_and_library_name(outputStream* st, // (used during error handling; its a coin toss, really, if on-stack allocation // is worse than (raw) C-heap allocation in that case). char* p = buf; - if (p == NULL) { + if (p == nullptr) { p = (char*)::alloca(O_BUFLEN); buflen = O_BUFLEN; } @@ -903,7 +903,7 @@ bool os::print_function_and_library_name(outputStream* st, // Print function name, optionally demangled if (demangle && strip_arguments) { char* args_start = strchr(p, '('); - if (args_start != NULL) { + if (args_start != nullptr) { *args_start = '\0'; } } @@ -924,7 +924,7 @@ bool os::print_function_and_library_name(outputStream* st, // Cut path parts if (shorten_paths) { char* p2 = strrchr(p, os::file_separator()[0]); - if (p2 != NULL) { + if (p2 != nullptr) { p = p2 + 1; } } @@ -987,7 +987,7 @@ void os::print_dhm(outputStream* st, const char* startStr, long sec) { long days = sec/86400; long hours = (sec/3600) - (days * 24); long minutes = (sec/60) - (days * 1440) - (hours * 60); - if (startStr == NULL) startStr = ""; + if (startStr == nullptr) startStr = ""; st->print_cr("%s %ld days %ld:%02ld hours", startStr, days, hours, minutes); } @@ -1005,9 +1005,9 @@ void os::print_environment_variables(outputStream* st, const char** env_list) { if (env_list) { st->print_cr("Environment Variables:"); - for (int i = 0; env_list[i] != NULL; i++) { + for (int i = 0; env_list[i] != nullptr; i++) { char *envvar = ::getenv(env_list[i]); - if (envvar != NULL) { + if (envvar != nullptr) { st->print("%s", env_list[i]); st->print("="); st->print("%s", envvar); @@ -1070,12 +1070,12 @@ void os::print_date_and_time(outputStream *st, char* buf, size_t buflen) { char* timestring = ctime(&tloc); // ctime adds newline. // edit out the newline char* nl = strchr(timestring, '\n'); - if (nl != NULL) { + if (nl != nullptr) { *nl = '\0'; } struct tm tz; - if (localtime_pd(&tloc, &tz) != NULL) { + if (localtime_pd(&tloc, &tz) != nullptr) { wchar_t w_buf[80]; size_t n = ::wcsftime(w_buf, 80, L"%Z", &tz); if (n > 0) { @@ -1106,7 +1106,7 @@ void os::print_date_and_time(outputStream *st, char* buf, size_t buflen) { // Check if pointer can be read from (4-byte read access). -// Helps to prove validity of a not-NULL pointer. +// Helps to prove validity of a non-null pointer. // Returns true in very early stages of VM life when stub is not yet generated. bool os::is_readable_pointer(const void* p) { int* const aligned = (int*) align_down((intptr_t)p, 4); @@ -1130,15 +1130,15 @@ bool os::is_readable_range(const void* from, const void* to) { // The verbose parameter is only set by the debug code in one case void os::print_location(outputStream* st, intptr_t x, bool verbose) { address addr = (address)x; - // Handle NULL first, so later checks don't need to protect against it. - if (addr == NULL) { - st->print_cr("0x0 is NULL"); + // Handle null first, so later checks don't need to protect against it. + if (addr == nullptr) { + st->print_cr("0x0 is nullptr"); return; } // Check if addr points into a code blob. CodeBlob* b = CodeCache::find_blob(addr); - if (b != NULL) { + if (b != nullptr) { b->dump_for_addr(addr, st, verbose); return; } @@ -1330,7 +1330,7 @@ FILE* os::fopen(const char* path, const char* mode) { #if !(defined LINUX || defined BSD || defined _WINDOWS) // assume fcntl FD_CLOEXEC support as a backup solution when 'e' or 'N' // is not supported as mode in fopen - if (file != NULL) { + if (file != nullptr) { int fd = fileno(file); if (fd != -1) { int fd_flags = fcntl(fd, F_GETFD); @@ -1352,7 +1352,7 @@ bool os::set_boot_path(char fileSep, char pathSep) { // modular image if "modules" jimage exists char* jimage = format_boot_path("%/lib/" MODULES_IMAGE_NAME, home, home_len, fileSep, pathSep); - if (jimage == NULL) return false; + if (jimage == nullptr) return false; bool has_jimage = (os::stat(jimage, &st) == 0); if (has_jimage) { Arguments::set_boot_class_path(jimage, true); @@ -1363,7 +1363,7 @@ bool os::set_boot_path(char fileSep, char pathSep) { // check if developer build with exploded modules char* base_classes = format_boot_path("%/modules/" JAVA_BASE_NAME, home, home_len, fileSep, pathSep); - if (base_classes == NULL) return false; + if (base_classes == nullptr) return false; if (os::stat(base_classes, &st) == 0) { Arguments::set_boot_class_path(base_classes, false); FREE_C_HEAP_ARRAY(char, base_classes); @@ -1376,7 +1376,7 @@ bool os::set_boot_path(char fileSep, char pathSep) { bool os::file_exists(const char* filename) { struct stat statbuf; - if (filename == NULL || strlen(filename) == 0) { + if (filename == nullptr || strlen(filename) == 0) { return false; } return os::stat(filename, &statbuf) == 0; @@ -1397,8 +1397,8 @@ bool os::file_exists(const char* filename) { // c> free up the data. char** os::split_path(const char* path, size_t* elements, size_t file_name_length) { *elements = (size_t)0; - if (path == NULL || strlen(path) == 0 || file_name_length == (size_t)NULL) { - return NULL; + if (path == nullptr || strlen(path) == 0 || file_name_length == (size_t)nullptr) { + return nullptr; } const char psepchar = *os::path_separator(); char* inpath = NEW_C_HEAP_ARRAY(char, strlen(path) + 1, mtInternal); @@ -1406,7 +1406,7 @@ char** os::split_path(const char* path, size_t* elements, size_t file_name_lengt size_t count = 1; char* p = strchr(inpath, psepchar); // Get a count of elements to allocate memory - while (p != NULL) { + while (p != nullptr) { count++; p++; p = strchr(p, psepchar); @@ -1499,7 +1499,7 @@ void os::pause() { #if defined(_WINDOWS) Sleep(100); #else - (void)::poll(NULL, 0, 100); + (void)::poll(nullptr, 0, 100); #endif } } else { @@ -1737,7 +1737,7 @@ bool os::create_stack_guard_pages(char* addr, size_t bytes) { char* os::reserve_memory(size_t bytes, bool executable, MEMFLAGS flags) { char* result = pd_reserve_memory(bytes, executable); - if (result != NULL) { + if (result != nullptr) { MemTracker::record_virtual_memory_reserve(result, bytes, CALLER_PC, flags); } return result; @@ -1745,7 +1745,7 @@ char* os::reserve_memory(size_t bytes, bool executable, MEMFLAGS flags) { char* os::attempt_reserve_memory_at(char* addr, size_t bytes, bool executable) { char* result = pd_attempt_reserve_memory_at(addr, bytes, executable); - if (result != NULL) { + if (result != nullptr) { MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC); } else { log_debug(os)("Attempt to reserve memory at " INTPTR_FORMAT " for " @@ -1862,9 +1862,9 @@ void os::pretouch_memory(void* start, void* end, size_t page_size) { char* os::map_memory_to_file(size_t bytes, int file_desc) { // Could have called pd_reserve_memory() followed by replace_existing_mapping_with_file_mapping(), // but AIX may use SHM in which case its more trouble to detach the segment and remap memory to the file. - // On all current implementations NULL is interpreted as any available address. - char* result = os::map_memory_to_file(NULL /* addr */, bytes, file_desc); - if (result != NULL) { + // On all current implementations null is interpreted as any available address. + char* result = os::map_memory_to_file(nullptr /* addr */, bytes, file_desc); + if (result != nullptr) { MemTracker::record_virtual_memory_reserve_and_commit(result, bytes, CALLER_PC); } return result; @@ -1872,7 +1872,7 @@ char* os::map_memory_to_file(size_t bytes, int file_desc) { char* os::attempt_map_memory_to_file_at(char* addr, size_t bytes, int file_desc) { char* result = pd_attempt_map_memory_to_file_at(addr, bytes, file_desc); - if (result != NULL) { + if (result != nullptr) { MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC); } return result; @@ -1882,7 +1882,7 @@ char* os::map_memory(int fd, const char* file_name, size_t file_offset, char *addr, size_t bytes, bool read_only, bool allow_exec, MEMFLAGS flags) { char* result = pd_map_memory(fd, file_name, file_offset, addr, bytes, read_only, allow_exec); - if (result != NULL) { + if (result != nullptr) { MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC, flags); } return result; @@ -1923,7 +1923,7 @@ char* os::reserve_memory_special(size_t size, size_t alignment, size_t page_size assert(is_aligned(addr, alignment), "Unaligned request address"); char* result = pd_reserve_memory_special(size, alignment, page_size, addr, executable); - if (result != NULL) { + if (result != nullptr) { // The memory is committed MemTracker::record_virtual_memory_reserve_and_commit((address)result, size, CALLER_PC); } diff --git a/src/hotspot/share/runtime/os.hpp b/src/hotspot/share/runtime/os.hpp index 1780ebc4916bd..e8927ac5b87d8 100644 --- a/src/hotspot/share/runtime/os.hpp +++ b/src/hotspot/share/runtime/os.hpp @@ -290,13 +290,13 @@ class os: AllStatic { // Fill in buffer with an ISO-8601 string corresponding to the given javaTimeMillis value // E.g., YYYY-MM-DDThh:mm:ss.mmm+zzzz. - // Returns buffer, or NULL if it failed. + // Returns buffer, or null if it failed. static char* iso8601_time(jlong milliseconds_since_19700101, char* buffer, size_t buffer_length, bool utc = false); // Fill in buffer with current local time as an ISO-8601 string. // E.g., YYYY-MM-DDThh:mm:ss.mmm+zzzz. - // Returns buffer, or NULL if it failed. + // Returns buffer, or null if it failed. static char* iso8601_time(char* buffer, size_t buffer_length, bool utc = false); // Interface for detecting multiprocessor system @@ -672,7 +672,7 @@ class os: AllStatic { // dladdr() for all platforms. Name of the nearest function is copied // to buf. Distance from its base address is optionally returned as offset. // If function name is not found, buf[0] is set to '\0' and offset is - // set to -1 (if offset is non-NULL). + // set to -1 (if offset is non-null). static bool dll_address_to_function_name(address addr, char* buf, int buflen, int* offset, bool demangle = true); @@ -680,7 +680,7 @@ class os: AllStatic { // Locate DLL/DSO. On success, full path of the library is copied to // buf, and offset is optionally set to be the distance between addr // and the library's base address. On failure, buf[0] is set to '\0' - // and offset is set to -1 (if offset is non-NULL). + // and offset is set to -1 (if offset is non-null). static bool dll_address_to_library_name(address addr, char* buf, int buflen, int* offset); @@ -697,7 +697,7 @@ class os: AllStatic { // "
in +" static bool print_function_and_library_name(outputStream* st, address addr, - char* buf = NULL, int buflen = 0, + char* buf = nullptr, int buflen = 0, bool shorten_paths = true, bool demangle = true, bool strip_arguments = false); @@ -711,7 +711,7 @@ class os: AllStatic { // Loads .dll/.so and // in case of error it checks if .dll/.so was built for the // same architecture as HotSpot is running on - // in case of an error NULL is returned and an error message is stored in ebuf + // in case of an error null is returned and an error message is stored in ebuf static void* dll_load(const char *name, char *ebuf, int ebuflen); // lookup symbol in a shared library @@ -816,7 +816,7 @@ class os: AllStatic { static bool is_first_C_frame(frame *fr); static frame get_sender_for_C_frame(frame *fr); - // return current frame. pc() and sp() are set to NULL on failure. + // return current frame. pc() and sp() are set to null on failure. static frame current_frame(); static void print_hex_dump(outputStream* st, address start, address end, int unitsize, @@ -826,7 +826,7 @@ class os: AllStatic { } // returns a string to describe the exception/signal; - // returns NULL if exception_code is not an OS exception/signal. + // returns null if exception_code is not an OS exception/signal. static const char* exception_name(int exception_code, char* buf, size_t buflen); // Returns the signal number (e.g. 11) for a given signal name (SIGSEGV). @@ -868,10 +868,10 @@ class os: AllStatic { static void* realloc (void *memblock, size_t size, MEMFLAGS flag, const NativeCallStack& stack); static void* realloc (void *memblock, size_t size, MEMFLAGS flag); - // handles NULL pointers + // handles null pointers static void free (void *memblock); static char* strdup(const char *, MEMFLAGS flags = mtInternal); // Like strdup - // Like strdup, but exit VM when strdup() returns NULL + // Like strdup, but exit VM when strdup() returns null static char* strdup_check_oom(const char*, MEMFLAGS flags = mtInternal); // SocketInterface (ex HPI SocketInterface ) diff --git a/src/hotspot/share/runtime/os.inline.hpp b/src/hotspot/share/runtime/os.inline.hpp index 3c6a4ffbaf4e4..6c7569643c29e 100644 --- a/src/hotspot/share/runtime/os.inline.hpp +++ b/src/hotspot/share/runtime/os.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -55,7 +55,7 @@ inline bool os::register_code_area(char *low, char *high) { #ifndef HAVE_FUNCTION_DESCRIPTORS inline void* os::resolve_function_descriptor(void* p) { - return NULL; + return nullptr; } #endif diff --git a/src/hotspot/share/runtime/park.cpp b/src/hotspot/share/runtime/park.cpp index e09affd4cbe9d..03560974d0598 100644 --- a/src/hotspot/share/runtime/park.cpp +++ b/src/hotspot/share/runtime/park.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -51,7 +51,7 @@ // immediately. volatile int ParkEvent::ListLock = 0 ; -ParkEvent * volatile ParkEvent::FreeList = NULL ; +ParkEvent * volatile ParkEvent::FreeList = nullptr ; ParkEvent * ParkEvent::Allocate (Thread * t) { ParkEvent * ev ; @@ -64,14 +64,14 @@ ParkEvent * ParkEvent::Allocate (Thread * t) { Thread::SpinAcquire(&ListLock, "ParkEventFreeListAllocate"); { ev = FreeList; - if (ev != NULL) { + if (ev != nullptr) { FreeList = ev->FreeNext; } } Thread::SpinRelease(&ListLock); - if (ev != NULL) { - guarantee (ev->AssociatedWith == NULL, "invariant") ; + if (ev != nullptr) { + guarantee (ev->AssociatedWith == nullptr, "invariant") ; } else { // Do this the hard way -- materialize a new ParkEvent. ev = new ParkEvent () ; @@ -79,14 +79,14 @@ ParkEvent * ParkEvent::Allocate (Thread * t) { } ev->reset() ; // courtesy to caller ev->AssociatedWith = t ; // Associate ev with t - ev->FreeNext = NULL ; + ev->FreeNext = nullptr ; return ev ; } void ParkEvent::Release (ParkEvent * ev) { - if (ev == NULL) return ; - guarantee (ev->FreeNext == NULL , "invariant") ; - ev->AssociatedWith = NULL ; + if (ev == nullptr) return ; + guarantee (ev->FreeNext == nullptr , "invariant") ; + ev->AssociatedWith = nullptr ; // Note that if we didn't have the TSM/immortal constraint, then // when reattaching we could trim the list. Thread::SpinAcquire(&ListLock, "ParkEventFreeListRelease"); diff --git a/src/hotspot/share/runtime/park.hpp b/src/hotspot/share/runtime/park.hpp index 836f3c21027bd..1af24f863972f 100644 --- a/src/hotspot/share/runtime/park.hpp +++ b/src/hotspot/share/runtime/park.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -137,9 +137,9 @@ class ParkEvent : public PlatformEvent { ~ParkEvent() { guarantee (0, "invariant") ; } ParkEvent() : PlatformEvent() { - AssociatedWith = NULL ; - FreeNext = NULL ; - ListNext = NULL ; + AssociatedWith = nullptr ; + FreeNext = nullptr ; + ListNext = nullptr ; TState = 0 ; Notified = 0 ; } diff --git a/src/hotspot/share/runtime/perfData.cpp b/src/hotspot/share/runtime/perfData.cpp index db6a418899617..0a67946b9206b 100644 --- a/src/hotspot/share/runtime/perfData.cpp +++ b/src/hotspot/share/runtime/perfData.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,9 +37,9 @@ #include "utilities/exceptions.hpp" #include "utilities/globalDefinitions.hpp" -PerfDataList* PerfDataManager::_all = NULL; -PerfDataList* PerfDataManager::_sampled = NULL; -PerfDataList* PerfDataManager::_constants = NULL; +PerfDataList* PerfDataManager::_all = nullptr; +PerfDataList* PerfDataManager::_sampled = nullptr; +PerfDataList* PerfDataManager::_constants = nullptr; volatile bool PerfDataManager::_has_PerfData = 0; /* @@ -80,7 +80,7 @@ const char* PerfDataManager::_name_spaces[] = { }; PerfData::PerfData(CounterNS ns, const char* name, Units u, Variability v) - : _name(NULL), _v(v), _u(u), _on_c_heap(false), _valuep(NULL) { + : _name(nullptr), _v(v), _u(u), _on_c_heap(false), _valuep(nullptr) { const char* prefix = PerfDataManager::ns_to_string(ns); @@ -136,7 +136,7 @@ void PerfData::create_entry(BasicType dtype, size_t dsize, size_t vlen) { size = ((size + align) & ~align); char* psmp = PerfMemory::alloc(size); - if (psmp == NULL) { + if (psmp == nullptr) { // out of PerfMemory memory resources. allocate on the C heap // to avoid vm termination. psmp = NEW_C_HEAP_ARRAY(char, size, mtInternal); @@ -194,7 +194,7 @@ PerfLong::PerfLong(CounterNS ns, const char* namep, Units u, Variability v) PerfLongVariant::PerfLongVariant(CounterNS ns, const char* namep, Units u, Variability v, jlong* sampled) : PerfLong(ns, namep, u, v), - _sampled(sampled), _sample_helper(NULL) { + _sampled(sampled), _sample_helper(nullptr) { sample(); } @@ -202,13 +202,13 @@ PerfLongVariant::PerfLongVariant(CounterNS ns, const char* namep, Units u, PerfLongVariant::PerfLongVariant(CounterNS ns, const char* namep, Units u, Variability v, PerfLongSampleHelper* helper) : PerfLong(ns, namep, u, v), - _sampled(NULL), _sample_helper(helper) { + _sampled(nullptr), _sample_helper(helper) { sample(); } void PerfLongVariant::sample() { - if (_sample_helper != NULL) { + if (_sample_helper != nullptr) { *(jlong*)_valuep = _sample_helper->take_sample(); } } @@ -223,8 +223,8 @@ PerfByteArray::PerfByteArray(CounterNS ns, const char* namep, Units u, void PerfString::set_string(const char* s2) { // copy n bytes of the string, assuring the null string is - // copied if s2 == NULL. - strncpy((char *)_valuep, s2 == NULL ? "" : s2, _length); + // copied if s2 == nullptr. + strncpy((char *)_valuep, s2 == nullptr ? "" : s2, _length); // assure the string is null terminated when strlen(s2) >= _length ((char*)_valuep)[_length-1] = '\0'; @@ -233,13 +233,13 @@ void PerfString::set_string(const char* s2) { PerfStringConstant::PerfStringConstant(CounterNS ns, const char* namep, const char* initial_value) : PerfString(ns, namep, V_Constant, - initial_value == NULL ? 1 : + initial_value == nullptr ? 1 : MIN2((jint)(strlen((char*)initial_value)+1), (jint)(PerfMaxStringConstLength+1)), initial_value) { if (PrintMiscellaneous && Verbose) { - if (is_valid() && initial_value != NULL && + if (is_valid() && initial_value != nullptr && ((jint)strlen(initial_value) > (jint)PerfMaxStringConstLength)) { warning("Truncating PerfStringConstant: name = %s," @@ -255,7 +255,7 @@ PerfStringConstant::PerfStringConstant(CounterNS ns, const char* namep, void PerfDataManager::destroy() { - if (_all == NULL) + if (_all == nullptr) // destroy already called, or initialization never happened return; @@ -270,8 +270,8 @@ void PerfDataManager::destroy() { os::naked_short_sleep(1); // 1ms sleep to let other thread(s) run log_debug(perf, datacreation)("Total = %d, Sampled = %d, Constants = %d", - _all->length(), _sampled == NULL ? 0 : _sampled->length(), - _constants == NULL ? 0 : _constants->length()); + _all->length(), _sampled == nullptr ? 0 : _sampled->length(), + _constants == nullptr ? 0 : _constants->length()); for (int index = 0; index < _all->length(); index++) { PerfData* p = _all->at(index); @@ -282,9 +282,9 @@ void PerfDataManager::destroy() { delete(_sampled); delete(_constants); - _all = NULL; - _sampled = NULL; - _constants = NULL; + _all = nullptr; + _sampled = nullptr; + _constants = nullptr; } void PerfDataManager::add_item(PerfData* p, bool sampled) { @@ -292,7 +292,7 @@ void PerfDataManager::add_item(PerfData* p, bool sampled) { MutexLocker ml(PerfDataManager_lock); // Default sizes determined using -Xlog:perf+datacreation=debug - if (_all == NULL) { + if (_all == nullptr) { _all = new PerfDataList(191); _has_PerfData = true; } @@ -303,7 +303,7 @@ void PerfDataManager::add_item(PerfData* p, bool sampled) { _all->append(p); if (p->variability() == PerfData::V_Constant) { - if (_constants == NULL) { + if (_constants == nullptr) { _constants = new PerfDataList(51); } _constants->append(p); @@ -311,7 +311,7 @@ void PerfDataManager::add_item(PerfData* p, bool sampled) { } if (sampled) { - if (_sampled == NULL) { + if (_sampled == nullptr) { _sampled = new PerfDataList(1); } _sampled->append(p); @@ -322,16 +322,16 @@ PerfDataList* PerfDataManager::sampled() { MutexLocker ml(PerfDataManager_lock); - if (_sampled == NULL) - return NULL; + if (_sampled == nullptr) + return nullptr; PerfDataList* clone = _sampled->clone(); return clone; } char* PerfDataManager::counter_name(const char* ns, const char* name) { - assert(ns != NULL, "ns string required"); - assert(name != NULL, "name string required"); + assert(ns != nullptr, "ns string required"); + assert(name != nullptr, "name string required"); size_t len = strlen(ns) + strlen(name) + 2; char* result = NEW_RESOURCE_ARRAY(char, len); @@ -394,7 +394,7 @@ PerfStringVariable* PerfDataManager::create_string_variable(CounterNS ns, const char* s, TRAPS) { - if (max_length == 0 && s != NULL) max_length = (int)strlen(s); + if (max_length == 0 && s != nullptr) max_length = (int)strlen(s); assert(max_length != 0, "PerfStringVariable with length 0"); @@ -436,7 +436,7 @@ PerfLongVariable* PerfDataManager::create_long_variable(CounterNS ns, TRAPS) { // Sampled counters not supported if UsePerfData is false - if (!UsePerfData) return NULL; + if (!UsePerfData) return nullptr; PerfLongVariable* p = new PerfLongVariable(ns, name, u, sh); @@ -476,7 +476,7 @@ PerfLongCounter* PerfDataManager::create_long_counter(CounterNS ns, TRAPS) { // Sampled counters not supported if UsePerfData is false - if (!UsePerfData) return NULL; + if (!UsePerfData) return nullptr; PerfLongCounter* p = new PerfLongCounter(ns, name, u, sh); @@ -511,7 +511,7 @@ PerfDataList::~PerfDataList() { bool PerfDataList::by_name(void* name, PerfData* pd) { - if (pd == NULL) + if (pd == nullptr) return false; return strcmp((const char*)name, pd->name()) == 0; @@ -524,14 +524,14 @@ PerfData* PerfDataList::find_by_name(const char* name) { if (i >= 0 && i <= _set->length()) return _set->at(i); else - return NULL; + return nullptr; } PerfDataList* PerfDataList::clone() { PerfDataList* copy = new PerfDataList(this); - assert(copy != NULL, "just checking"); + assert(copy != nullptr, "just checking"); return copy; } diff --git a/src/hotspot/share/runtime/perfData.hpp b/src/hotspot/share/runtime/perfData.hpp index 2cefc277e4a8c..e396dea15974a 100644 --- a/src/hotspot/share/runtime/perfData.hpp +++ b/src/hotspot/share/runtime/perfData.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -307,7 +307,7 @@ class PerfData : public CHeapObj { // returns a boolean indicating the validity of this object. // the object is valid if and only if memory in PerfMemory // region was successfully allocated. - inline bool is_valid() { return _valuep != NULL; } + inline bool is_valid() { return _valuep != nullptr; } // returns a boolean indicating whether the underlying object // was allocated in the PerfMemory region or on the C heap. @@ -604,12 +604,12 @@ class PerfDataList : public CHeapObj { ~PerfDataList(); // return the PerfData item indicated by name, - // or NULL if it doesn't exist. + // or null if it doesn't exist. PerfData* find_by_name(const char* name); // return true if a PerfData item with the name specified in the // argument exists, otherwise return false. - bool contains(const char* name) { return find_by_name(name) != NULL; } + bool contains(const char* name) { return find_by_name(name) != nullptr; } // return the number of PerfData items in this list inline int length(); diff --git a/src/hotspot/share/runtime/perfData.inline.hpp b/src/hotspot/share/runtime/perfData.inline.hpp index c802aa2f6d92b..5212691b92415 100644 --- a/src/hotspot/share/runtime/perfData.inline.hpp +++ b/src/hotspot/share/runtime/perfData.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -43,7 +43,7 @@ inline PerfData* PerfDataList::at(int index) { } inline bool PerfDataManager::exists(const char* name) { - if (_all != NULL) { + if (_all != nullptr) { return _all->contains(name); } else { return false; diff --git a/src/hotspot/share/runtime/perfMemory.cpp b/src/hotspot/share/runtime/perfMemory.cpp index 2906d33487494..9f6cee4782654 100644 --- a/src/hotspot/share/runtime/perfMemory.cpp +++ b/src/hotspot/share/runtime/perfMemory.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -47,12 +47,12 @@ const char PERFDATA_NAME[] = "hsperfdata"; static const size_t PERFDATA_FILENAME_LEN = sizeof(PERFDATA_NAME) + UINT_CHARS + 1; -char* PerfMemory::_start = NULL; -char* PerfMemory::_end = NULL; -char* PerfMemory::_top = NULL; +char* PerfMemory::_start = nullptr; +char* PerfMemory::_end = nullptr; +char* PerfMemory::_top = nullptr; size_t PerfMemory::_capacity = 0; int PerfMemory::_initialized = false; -PerfDataPrologue* PerfMemory::_prologue = NULL; +PerfDataPrologue* PerfMemory::_prologue = nullptr; bool PerfMemory::_destroyed = false; void perfMemory_init() { @@ -106,7 +106,7 @@ void PerfMemory::initialize() { // allocate PerfData memory region create_memory_region(capacity); - if (_start == NULL) { + if (_start == nullptr) { // the PerfMemory region could not be created as desired. Rather // than terminating the JVM, we revert to creating the instrumentation @@ -136,7 +136,7 @@ void PerfMemory::initialize() { _top = _start + sizeof(PerfDataPrologue); } - assert(_prologue != NULL, "prologue pointer must be initialized"); + assert(_prologue != nullptr, "prologue pointer must be initialized"); #ifdef VM_LITTLE_ENDIAN _prologue->magic = (jint)0xc0c0feca; @@ -163,7 +163,7 @@ void PerfMemory::destroy() { if (!is_usable()) return; - if (_start != NULL && _prologue->overflow != 0) { + if (_start != nullptr && _prologue->overflow != 0) { // This state indicates that the contiguous memory region exists and // that it wasn't large enough to hold all the counters. In this case, @@ -188,7 +188,7 @@ void PerfMemory::destroy() { } } - if (_start != NULL) { + if (_start != nullptr) { // this state indicates that the contiguous memory region was successfully // and that persistent resources may need to be cleaned up. This is @@ -206,7 +206,7 @@ void PerfMemory::destroy() { // char* PerfMemory::alloc(size_t size) { - if (!UsePerfData) return NULL; + if (!UsePerfData) return nullptr; MutexLocker ml(PerfDataMemAlloc_lock); @@ -217,7 +217,7 @@ char* PerfMemory::alloc(size_t size) { _prologue->overflow += (jint)size; - return NULL; + return nullptr; } char* result = _top; @@ -243,9 +243,9 @@ void PerfMemory::mark_updated() { // Returns the complete path including the file name of performance data file. // Caller is expected to release the allocated memory. char* PerfMemory::get_perfdata_file_path() { - char* dest_file = NULL; + char* dest_file = nullptr; - if (PerfDataSaveFile != NULL) { + if (PerfDataSaveFile != nullptr) { // dest_file_name stores the validated file name if file_name // contains %p which will be replaced by pid. dest_file = NEW_C_HEAP_ARRAY(char, JVM_MAXPATHLEN, mtInternal); diff --git a/src/hotspot/share/runtime/perfMemory.hpp b/src/hotspot/share/runtime/perfMemory.hpp index 0d81263051682..e6a914cc367c7 100644 --- a/src/hotspot/share/runtime/perfMemory.hpp +++ b/src/hotspot/share/runtime/perfMemory.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -137,7 +137,7 @@ class PerfMemory : AllStatic { static bool is_destroyed() { return _destroyed; } static bool is_usable() { return is_initialized() && !is_destroyed(); } static bool contains(char* addr) { - return ((_start != NULL) && (addr >= _start) && (addr < _end)); + return ((_start != nullptr) && (addr >= _start) && (addr < _end)); } static void mark_updated(); diff --git a/src/hotspot/share/runtime/reflection.cpp b/src/hotspot/share/runtime/reflection.cpp index bf840996b46c8..62bc2a5ebadb8 100644 --- a/src/hotspot/share/runtime/reflection.cpp +++ b/src/hotspot/share/runtime/reflection.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -54,14 +54,14 @@ #include "utilities/formatBuffer.hpp" static void trace_class_resolution(oop mirror) { - if (mirror == NULL || java_lang_Class::is_primitive(mirror)) { + if (mirror == nullptr || java_lang_Class::is_primitive(mirror)) { return; } Klass* to_class = java_lang_Class::as_Klass(mirror); ResourceMark rm; int line_number = -1; - const char * source_file = NULL; - Klass* caller = NULL; + const char * source_file = nullptr; + Klass* caller = nullptr; JavaThread* jthread = JavaThread::current(); if (jthread->has_last_Java_frame()) { vframeStream vfst(jthread); @@ -75,16 +75,16 @@ static void trace_class_resolution(oop mirror) { caller = vfst.method()->method_holder(); line_number = vfst.method()->line_number_from_bci(vfst.bci()); Symbol* s = vfst.method()->method_holder()->source_file_name(); - if (s != NULL) { + if (s != nullptr) { source_file = s->as_C_string(); } } } - if (caller != NULL) { + if (caller != nullptr) { const char * from = caller->external_name(); const char * to = to_class->external_name(); // print in a single call to reduce interleaving between threads - if (source_file != NULL) { + if (source_file != nullptr) { log_debug(class, resolve)("%s %s %s:%d (reflection)", from, to, source_file, line_number); } else { log_debug(class, resolve)("%s %s (reflection)", from, to); @@ -95,14 +95,14 @@ static void trace_class_resolution(oop mirror) { oop Reflection::box(jvalue* value, BasicType type, TRAPS) { if (type == T_VOID) { - return NULL; + return nullptr; } if (is_reference_type(type)) { // regular objects are not boxed return cast_to_oop(value->l); } oop result = java_lang_boxing_object::create(type, value, CHECK_NULL); - if (result == NULL) { + if (result == nullptr) { THROW_(vmSymbols::java_lang_IllegalArgumentException(), result); } return result; @@ -110,7 +110,7 @@ oop Reflection::box(jvalue* value, BasicType type, TRAPS) { BasicType Reflection::unbox_for_primitive(oop box, jvalue* value, TRAPS) { - if (box == NULL) { + if (box == nullptr) { THROW_(vmSymbols::java_lang_IllegalArgumentException(), T_ILLEGAL); } return java_lang_boxing_object::get_value(box, value); @@ -274,7 +274,7 @@ void Reflection::array_set(jvalue* value, arrayOop a, int index, BasicType value if (a->is_objArray()) { if (value_type == T_OBJECT) { oop obj = cast_to_oop(value->l); - if (obj != NULL) { + if (obj != nullptr) { Klass* element_klass = ObjArrayKlass::cast(a->klass())->element_klass(); if (!obj->is_a(element_klass)) { THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "array element type mismatch"); @@ -333,7 +333,7 @@ static Klass* basic_type_mirror_to_arrayklass(oop basic_type_mirror, TRAPS) { } arrayOop Reflection::reflect_new_array(oop element_mirror, jint length, TRAPS) { - if (element_mirror == NULL) { + if (element_mirror == nullptr) { THROW_0(vmSymbols::java_lang_NullPointerException()); } if (length < 0) { @@ -356,7 +356,7 @@ arrayOop Reflection::reflect_new_multi_array(oop element_mirror, typeArrayOop di assert(dim_array->is_typeArray(), "just checking"); assert(TypeArrayKlass::cast(dim_array->klass())->element_type() == T_INT, "just checking"); - if (element_mirror == NULL) { + if (element_mirror == nullptr) { THROW_0(vmSymbols::java_lang_NullPointerException()); } @@ -443,7 +443,7 @@ Reflection::VerifyClassAccessResults Reflection::verify_class_access( // Verify that current_class can access new_class. If the classloader_only // flag is set, we automatically allow any accesses in which current_class // doesn't have a classloader. - if ((current_class == NULL) || + if ((current_class == nullptr) || (current_class == new_class) || is_same_class_package(current_class, new_class)) { return ACCESS_OK; @@ -492,7 +492,7 @@ Reflection::VerifyClassAccessResults Reflection::verify_class_access( } PackageEntry* package_to = new_class->package(); - assert(package_to != NULL, "can not obtain new_class' package"); + assert(package_to != nullptr, "can not obtain new_class' package"); { MutexLocker m1(Module_lock); @@ -530,15 +530,15 @@ char* Reflection::verify_class_access_msg(const Klass* current_class, const InstanceKlass* new_class, const VerifyClassAccessResults result) { assert(result != ACCESS_OK, "must be failure result"); - char * msg = NULL; - if (result != OTHER_PROBLEM && new_class != NULL && current_class != NULL) { + char * msg = nullptr; + if (result != OTHER_PROBLEM && new_class != nullptr && current_class != nullptr) { // Find the module entry for current_class, the accessor ModuleEntry* module_from = current_class->module(); const char * module_from_name = module_from->is_named() ? module_from->name()->as_C_string() : UNNAMED_MODULE; const char * current_class_name = current_class->external_name(); // Find the module entry for new_class, the accessee - ModuleEntry* module_to = NULL; + ModuleEntry* module_to = nullptr; module_to = new_class->module(); const char * module_to_name = module_to->is_named() ? module_to->name()->as_C_string() : UNNAMED_MODULE; const char * new_class_name = new_class->external_name(); @@ -555,7 +555,7 @@ char* Reflection::verify_class_access_msg(const Klass* current_class, module_to_name, module_from_name, module_to_name); } else { oop jlm = module_to->module(); - assert(jlm != NULL, "Null jlm in module_to ModuleEntry"); + assert(jlm != nullptr, "Null jlm in module_to ModuleEntry"); intptr_t identity_hash = jlm->identity_hash(); size_t len = 160 + strlen(current_class_name) + 2*strlen(module_from_name) + strlen(new_class_name) + 2*sizeof(uintx); @@ -567,7 +567,7 @@ char* Reflection::verify_class_access_msg(const Klass* current_class, } } else if (result == TYPE_NOT_EXPORTED) { - assert(new_class->package() != NULL, + assert(new_class->package() != nullptr, "Unnamed packages are always exported"); const char * package_name = new_class->package()->name()->as_klass_external_name(); @@ -582,7 +582,7 @@ char* Reflection::verify_class_access_msg(const Klass* current_class, module_to_name, module_to_name, package_name, module_from_name); } else { oop jlm = module_from->module(); - assert(jlm != NULL, "Null jlm in module_from ModuleEntry"); + assert(jlm != nullptr, "Null jlm in module_from ModuleEntry"); intptr_t identity_hash = jlm->identity_hash(); size_t len = 170 + strlen(current_class_name) + strlen(new_class_name) + 2*strlen(module_to_name) + strlen(package_name) + 2*sizeof(uintx); @@ -618,7 +618,7 @@ bool Reflection::verify_member_access(const Klass* current_class, // class file parsing when we only care about the static type); in that case // callers should ensure that resolved_class == member_class. // - if ((current_class == NULL) || + if ((current_class == nullptr) || (current_class == member_class) || access.is_public()) { return true; @@ -748,7 +748,7 @@ static objArrayHandle get_parameter_types(const methodHandle& method, } if (!ss.at_return_type()) { mirrors->obj_at_put(index++, mirror); - } else if (return_type != NULL) { + } else if (return_type != nullptr) { // Collect return type as well assert(ss.at_return_type(), "return type should be present"); *return_type = mirror; @@ -781,9 +781,9 @@ oop Reflection::new_method(const methodHandle& method, bool for_constant_pool_ac Symbol* signature = method->signature(); int parameter_count = ArgumentCount(signature).size(); - oop return_type_oop = NULL; + oop return_type_oop = nullptr; objArrayHandle parameter_types = get_parameter_types(method, parameter_count, &return_type_oop, CHECK_NULL); - if (parameter_types.is_null() || return_type_oop == NULL) return NULL; + if (parameter_types.is_null() || return_type_oop == nullptr) return nullptr; Handle return_type(THREAD, return_type_oop); @@ -793,7 +793,7 @@ oop Reflection::new_method(const methodHandle& method, bool for_constant_pool_ac Symbol* method_name = method->name(); oop name_oop = StringTable::intern(method_name, CHECK_NULL); Handle name = Handle(THREAD, name_oop); - if (name == NULL) return NULL; + if (name == nullptr) return nullptr; const int modifiers = method->access_flags().as_int() & JVM_RECOGNIZED_METHOD_MODIFIERS; @@ -807,7 +807,7 @@ oop Reflection::new_method(const methodHandle& method, bool for_constant_pool_ac java_lang_reflect_Method::set_exception_types(mh(), exception_types()); java_lang_reflect_Method::set_modifiers(mh(), modifiers); java_lang_reflect_Method::set_override(mh(), false); - if (method->generic_signature() != NULL) { + if (method->generic_signature() != nullptr) { Symbol* gs = method->generic_signature(); Handle sig = java_lang_String::create_from_symbol(gs, CHECK_NULL); java_lang_reflect_Method::set_signature(mh(), sig()); @@ -830,8 +830,8 @@ oop Reflection::new_constructor(const methodHandle& method, TRAPS) { Symbol* signature = method->signature(); int parameter_count = ArgumentCount(signature).size(); - objArrayHandle parameter_types = get_parameter_types(method, parameter_count, NULL, CHECK_NULL); - if (parameter_types.is_null()) return NULL; + objArrayHandle parameter_types = get_parameter_types(method, parameter_count, nullptr, CHECK_NULL); + if (parameter_types.is_null()) return nullptr; objArrayHandle exception_types = get_exception_types(method, CHECK_NULL); assert(!exception_types.is_null(), "cannot return null"); @@ -846,7 +846,7 @@ oop Reflection::new_constructor(const methodHandle& method, TRAPS) { java_lang_reflect_Constructor::set_exception_types(ch(), exception_types()); java_lang_reflect_Constructor::set_modifiers(ch(), modifiers); java_lang_reflect_Constructor::set_override(ch(), false); - if (method->generic_signature() != NULL) { + if (method->generic_signature() != nullptr) { Symbol* gs = method->generic_signature(); Handle sig = java_lang_String::create_from_symbol(gs, CHECK_NULL); java_lang_reflect_Constructor::set_signature(ch(), sig()); @@ -893,11 +893,11 @@ oop Reflection::new_parameter(Handle method, int index, Symbol* sym, Handle rh = java_lang_reflect_Parameter::create(CHECK_NULL); - if(NULL != sym) { + if(nullptr != sym) { Handle name = java_lang_String::create_from_symbol(sym, CHECK_NULL); java_lang_reflect_Parameter::set_name(rh(), name()); } else { - java_lang_reflect_Parameter::set_name(rh(), NULL); + java_lang_reflect_Parameter::set_name(rh(), nullptr); } java_lang_reflect_Parameter::set_modifiers(rh(), flags); @@ -1094,7 +1094,7 @@ static oop invoke(InstanceKlass* klass, THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "argument type mismatch"); } } else { - if (arg != NULL) { + if (arg != nullptr) { Klass* k = java_lang_Class::as_Klass(type_mirror); if (!arg->is_a(k)) { THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), @@ -1153,7 +1153,7 @@ oop Reflection::invoke_method(oop method_mirror, Handle receiver, objArrayHandle InstanceKlass* klass = InstanceKlass::cast(java_lang_Class::as_Klass(mirror)); Method* m = klass->method_with_idnum(slot); - if (m == NULL) { + if (m == nullptr) { THROW_MSG_0(vmSymbols::java_lang_InternalError(), "invoke"); } methodHandle method(THREAD, m); @@ -1170,7 +1170,7 @@ oop Reflection::invoke_constructor(oop constructor_mirror, objArrayHandle args, InstanceKlass* klass = InstanceKlass::cast(java_lang_Class::as_Klass(mirror)); Method* m = klass->method_with_idnum(slot); - if (m == NULL) { + if (m == nullptr) { THROW_MSG_0(vmSymbols::java_lang_InternalError(), "invoke"); } methodHandle method(THREAD, m); diff --git a/src/hotspot/share/runtime/reflectionUtils.cpp b/src/hotspot/share/runtime/reflectionUtils.cpp index 0c15ebe6a870d..16b3ca7731582 100644 --- a/src/hotspot/share/runtime/reflectionUtils.cpp +++ b/src/hotspot/share/runtime/reflectionUtils.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -47,11 +47,11 @@ KlassStream::KlassStream(InstanceKlass* klass, bool local_only, bool KlassStream::eos() { if (index() >= 0) return false; if (_local_only) return true; - if (!_klass->is_interface() && _klass->super() != NULL) { + if (!_klass->is_interface() && _klass->super() != nullptr) { // go up superclass chain (not for interfaces) _klass = _klass->java_super(); // Next for method walks, walk default methods - } else if (_walk_defaults && (_defaults_checked == false) && (_base_klass->default_methods() != NULL)) { + } else if (_walk_defaults && (_defaults_checked == false) && (_base_klass->default_methods() != nullptr)) { _base_class_search_defaults = true; _klass = _base_klass; _defaults_checked = true; diff --git a/src/hotspot/share/runtime/registerMap.hpp b/src/hotspot/share/runtime/registerMap.hpp index c532a5062ec0f..5cf68a87b7120 100644 --- a/src/hotspot/share/runtime/registerMap.hpp +++ b/src/hotspot/share/runtime/registerMap.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -143,7 +143,7 @@ class RegisterMap : public StackObj { void set_walk_cont(bool value) { _walk_cont = value; } - bool in_cont() const { return _chunk() != NULL; } // Whether we are currently on the hstack; if true, frames are relativized + bool in_cont() const { return _chunk() != nullptr; } // Whether we are currently on the hstack; if true, frames are relativized oop cont() const; stackChunkHandle stack_chunk() const { return _chunk; } void set_stack_chunk(stackChunkOop chunk); diff --git a/src/hotspot/share/runtime/relocator.cpp b/src/hotspot/share/runtime/relocator.cpp index a8b239d6db2c8..f3f8de2bf3330 100644 --- a/src/hotspot/share/runtime/relocator.cpp +++ b/src/hotspot/share/runtime/relocator.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -122,14 +122,14 @@ class ChangeSwitchPad : public ChangeItem { Relocator::Relocator(const methodHandle& m, RelocatorListener* listener) { set_method(m); set_code_length(method()->code_size()); - set_code_array(NULL); + set_code_array(nullptr); // Allocate code array and copy bytecodes if (!expand_code_array(0)) { // Should have at least MAX_METHOD_LENGTH available or the verifier // would have failed. ShouldNotReachHere(); } - set_compressed_line_number_table(NULL); + set_compressed_line_number_table(nullptr); set_compressed_line_number_table_size(0); _listener = listener; } @@ -173,7 +173,7 @@ methodHandle Relocator::insert_space_at(int bci, int size, u_char inst_buffer[], bool Relocator::handle_code_changes() { - assert(_changes != NULL, "changes vector must be initialized"); + assert(_changes != nullptr, "changes vector must be initialized"); while (!_changes->is_empty()) { // Inv: everything is aligned. @@ -501,7 +501,7 @@ void Relocator::adjust_stack_map_table(int bci, int delta) { ClassLoaderData* loader_data = method()->method_holder()->class_loader_data(); Array* new_data = insert_hole_at(loader_data, frame_offset + 1, 2, data); - if (new_data == NULL) { + if (new_data == nullptr) { return; // out-of-memory? } // Deallocate old data @@ -517,7 +517,7 @@ void Relocator::adjust_stack_map_table(int bci, int delta) { same_frame_extended::create_at(frame_addr, new_offset_delta); } else { same_locals_1_stack_item_extended::create_at( - frame_addr, new_offset_delta, NULL); + frame_addr, new_offset_delta, nullptr); // the verification_info_type should already be at the right spot } } @@ -539,7 +539,7 @@ void Relocator::adjust_stack_map_table(int bci, int delta) { // Full frame has stack values too full_frame* ff = frame->as_full_frame(); - if (ff != NULL) { + if (ff != nullptr) { address eol = (address)types; number_of_types = ff->stack_slots(eol); types = ff->stack(eol); @@ -574,7 +574,7 @@ bool Relocator::expand_code_array(int delta) { if (!new_code_array) return false; // Expanding current array - if (code_array() != NULL) { + if (code_array() != nullptr) { memcpy(new_code_array, code_array(), code_length()); } else { // Initial copy. Copy directly from Method* diff --git a/src/hotspot/share/runtime/relocator.hpp b/src/hotspot/share/runtime/relocator.hpp index c0c42fdfda1fb..7318d43c0244b 100644 --- a/src/hotspot/share/runtime/relocator.hpp +++ b/src/hotspot/share/runtime/relocator.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -97,7 +97,7 @@ class Relocator : public ResourceObj { // get the address of in the code_array inline char* addr_at(int bci) const { return (char*) &code_array()[bci]; } - int instruction_length_at(int bci) { return Bytecodes::length_at(NULL, code_array() + bci); } + int instruction_length_at(int bci) { return Bytecodes::length_at(nullptr, code_array() + bci); } // Helper methods int align(int n) const { return (n+3) & ~3; } @@ -119,7 +119,7 @@ class Relocator : public ResourceObj { // Callback support RelocatorListener *_listener; void notify(int bci, int delta, int new_code_length) { - if (_listener != NULL) + if (_listener != nullptr) _listener->relocated(bci, delta, new_code_length); } }; diff --git a/src/hotspot/share/runtime/safepoint.cpp b/src/hotspot/share/runtime/safepoint.cpp index ee0891c423cc2..2ff593a01435c 100644 --- a/src/hotspot/share/runtime/safepoint.cpp +++ b/src/hotspot/share/runtime/safepoint.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -196,7 +196,7 @@ bool SafepointSynchronize::thread_not_running(ThreadSafepointState *cur_state) { static void assert_list_is_valid(const ThreadSafepointState* tss_head, int still_running) { int a = 0; const ThreadSafepointState *tmp_tss = tss_head; - while (tmp_tss != NULL) { + while (tmp_tss != nullptr) { ++a; assert(tmp_tss->is_running(), "Illegal initial state"); tmp_tss = tmp_tss->get_next(); @@ -228,11 +228,11 @@ int SafepointSynchronize::synchronize_threads(jlong safepoint_limit_time, int no // Iterate through all threads until it has been determined how to stop them all at a safepoint. int still_running = nof_threads; - ThreadSafepointState *tss_head = NULL; + ThreadSafepointState *tss_head = nullptr; ThreadSafepointState **p_prev = &tss_head; for (; JavaThread *cur = jtiwh.next(); ) { ThreadSafepointState *cur_tss = cur->safepoint_state(); - assert(cur_tss->get_next() == NULL, "Must be NULL"); + assert(cur_tss->get_next() == nullptr, "Must be nullptr"); if (thread_not_running(cur_tss)) { --still_running; } else { @@ -240,7 +240,7 @@ int SafepointSynchronize::synchronize_threads(jlong safepoint_limit_time, int no p_prev = cur_tss->next_ptr(); } } - *p_prev = NULL; + *p_prev = nullptr; DEBUG_ONLY(assert_list_is_valid(tss_head, still_running);) @@ -248,7 +248,7 @@ int SafepointSynchronize::synchronize_threads(jlong safepoint_limit_time, int no // If there is no thread still running, we are already done. if (still_running <= 0) { - assert(tss_head == NULL, "Must be empty"); + assert(tss_head == nullptr, "Must be empty"); return 1; } @@ -263,14 +263,14 @@ int SafepointSynchronize::synchronize_threads(jlong safepoint_limit_time, int no p_prev = &tss_head; ThreadSafepointState *cur_tss = tss_head; - while (cur_tss != NULL) { + while (cur_tss != nullptr) { assert(cur_tss->is_running(), "Illegal initial state"); if (thread_not_running(cur_tss)) { --still_running; - *p_prev = NULL; + *p_prev = nullptr; ThreadSafepointState *tmp = cur_tss; cur_tss = cur_tss->get_next(); - tmp->set_next(NULL); + tmp->set_next(nullptr); } else { *p_prev = cur_tss; p_prev = cur_tss->next_ptr(); @@ -287,7 +287,7 @@ int SafepointSynchronize::synchronize_threads(jlong safepoint_limit_time, int no iterations++; } while (still_running > 0); - assert(tss_head == NULL, "Must be empty"); + assert(tss_head == nullptr, "Must be empty"); return iterations; } @@ -598,10 +598,10 @@ void SafepointSynchronize::do_cleanup_tasks() { TraceTime timer("safepoint cleanup tasks", TRACETIME_LOG(Info, safepoint, cleanup)); CollectedHeap* heap = Universe::heap(); - assert(heap != NULL, "heap not initialized yet?"); + assert(heap != nullptr, "heap not initialized yet?"); ParallelCleanupTask cleanup; WorkerThreads* cleanup_workers = heap->safepoint_workers(); - if (cleanup_workers != NULL) { + if (cleanup_workers != nullptr) { // Parallel cleanup using GC provided thread pool. cleanup_workers->run_task(&cleanup); } else { @@ -684,7 +684,7 @@ bool SafepointSynchronize::handshake_safe(JavaThread *thread) { // Implementation of Safepoint blocking point void SafepointSynchronize::block(JavaThread *thread) { - assert(thread != NULL, "thread must be set"); + assert(thread != nullptr, "thread must be set"); // Threads shouldn't block if they are in the middle of printing, but... ttyLocker::break_tty_lock_for_safepoint(os::current_thread_id()); @@ -805,7 +805,7 @@ void SafepointSynchronize::print_safepoint_timeout() { ThreadSafepointState::ThreadSafepointState(JavaThread *thread) : _at_poll_safepoint(false), _thread(thread), _safepoint_safe(false), - _safepoint_id(SafepointSynchronize::InactiveSafepointCounter), _next(NULL) { + _safepoint_id(SafepointSynchronize::InactiveSafepointCounter), _next(nullptr) { } void ThreadSafepointState::create(JavaThread *thread) { @@ -816,7 +816,7 @@ void ThreadSafepointState::create(JavaThread *thread) { void ThreadSafepointState::destroy(JavaThread *thread) { if (thread->safepoint_state()) { delete(thread->safepoint_state()); - thread->set_safepoint_state(NULL); + thread->set_safepoint_state(nullptr); } } @@ -893,7 +893,7 @@ void ThreadSafepointState::handle_polling_page_exception() { address real_return_addr = self->saved_exception_pc(); CodeBlob *cb = CodeCache::find_blob(real_return_addr); - assert(cb != NULL && cb->is_compiled(), "return address should be in nmethod"); + assert(cb != nullptr && cb->is_compiled(), "return address should be in nmethod"); CompiledMethod* nm = (CompiledMethod*)cb; // Find frame of caller diff --git a/src/hotspot/share/runtime/serviceThread.cpp b/src/hotspot/share/runtime/serviceThread.cpp index 82e9c9897e4ff..07631cb5d64ed 100644 --- a/src/hotspot/share/runtime/serviceThread.cpp +++ b/src/hotspot/share/runtime/serviceThread.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -52,8 +52,8 @@ #include "services/lowMemoryDetector.hpp" #include "services/threadIdTable.hpp" -DEBUG_ONLY(JavaThread* ServiceThread::_instance = NULL;) -JvmtiDeferredEvent* ServiceThread::_jvmti_event = NULL; +DEBUG_ONLY(JavaThread* ServiceThread::_instance = nullptr;) +JvmtiDeferredEvent* ServiceThread::_jvmti_event = nullptr; // The service thread has it's own static deferred event queue. // Events can be posted before JVMTI vm_start, so it's too early to call JvmtiThreadState::state_for // to add this field to the per-JavaThread event queue. TODO: fix this sometime later @@ -151,7 +151,7 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) { if (has_jvmti_events) { _jvmti_event->post(); - _jvmti_event = NULL; // reset + _jvmti_event = nullptr; // reset } if (!UseNotificationThread) { @@ -203,7 +203,7 @@ void ServiceThread::enqueue_deferred_event(JvmtiDeferredEvent* event) { // If you enqueue events before the service thread runs, gc // cannot keep the nmethod alive. This could be restricted to compiled method // load and unload events, if we wanted to be picky. - assert(_instance != NULL, "cannot enqueue events before the service thread runs"); + assert(_instance != nullptr, "cannot enqueue events before the service thread runs"); _jvmti_service_queue.enqueue(*event); Service_lock->notify_all(); } @@ -212,7 +212,7 @@ void ServiceThread::oops_do_no_frames(OopClosure* f, CodeBlobClosure* cf) { JavaThread::oops_do_no_frames(f, cf); // The ServiceThread "owns" the JVMTI Deferred events, scan them here // to keep them alive until they are processed. - if (_jvmti_event != NULL) { + if (_jvmti_event != nullptr) { _jvmti_event->oops_do(f, cf); } // Requires a lock, because threads can be adding to this queue. @@ -222,8 +222,8 @@ void ServiceThread::oops_do_no_frames(OopClosure* f, CodeBlobClosure* cf) { void ServiceThread::nmethods_do(CodeBlobClosure* cf) { JavaThread::nmethods_do(cf); - if (cf != NULL) { - if (_jvmti_event != NULL) { + if (cf != nullptr) { + if (_jvmti_event != nullptr) { _jvmti_event->nmethods_do(cf); } // Requires a lock, because threads can be adding to this queue. diff --git a/src/hotspot/share/runtime/sharedRuntime.cpp b/src/hotspot/share/runtime/sharedRuntime.cpp index 369790dff3ac3..c563376084bf4 100644 --- a/src/hotspot/share/runtime/sharedRuntime.cpp +++ b/src/hotspot/share/runtime/sharedRuntime.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -559,7 +559,7 @@ address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* curr #if INCLUDE_JVMCI // JVMCI's ExceptionHandlerStub expects the thread local exception PC to be clear // and other exception handler continuations do not read it - current->set_exception_pc(NULL); + current->set_exception_pc(nullptr); #endif // INCLUDE_JVMCI if (Continuation::is_return_barrier_entry(return_address)) { @@ -568,8 +568,8 @@ address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* curr // The fastest case first CodeBlob* blob = CodeCache::find_blob(return_address); - CompiledMethod* nm = (blob != NULL) ? blob->as_compiled_method_or_null() : NULL; - if (nm != NULL) { + CompiledMethod* nm = (blob != nullptr) ? blob->as_compiled_method_or_null() : nullptr; + if (nm != nullptr) { // Set flag if return address is a method handle call site. current->set_is_method_handle_return(nm->is_method_handle_return(return_address)); // native nmethods don't have exception handlers @@ -603,7 +603,7 @@ address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* curr // JavaCallWrapper::~JavaCallWrapper return StubRoutines::catch_exception_entry(); } - if (blob != NULL && blob->is_upcall_stub()) { + if (blob != nullptr && blob->is_upcall_stub()) { return ((UpcallStub*)blob)->exception_handler(); } // Interpreted code @@ -613,8 +613,8 @@ address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* curr return Interpreter::rethrow_exception_entry(); } - guarantee(blob == NULL || !blob->is_runtime_stub(), "caller should have skipped stub"); - guarantee(!VtableStubs::contains(return_address), "NULL exceptions in vtables should have been handled already!"); + guarantee(blob == nullptr || !blob->is_runtime_stub(), "caller should have skipped stub"); + guarantee(!VtableStubs::contains(return_address), "null exceptions in vtables should have been handled already!"); #ifndef PRODUCT { ResourceMark rm; @@ -626,7 +626,7 @@ address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* curr #endif // PRODUCT ShouldNotReachHere(); - return NULL; + return nullptr; } @@ -641,7 +641,7 @@ address SharedRuntime::get_poll_stub(address pc) { CodeBlob *cb = CodeCache::find_blob(pc); // Should be an nmethod - guarantee(cb != NULL && cb->is_compiled(), "safepoint polling: pc must refer to an nmethod"); + guarantee(cb != nullptr && cb->is_compiled(), "safepoint polling: pc must refer to an nmethod"); // Look up the relocation information assert(((CompiledMethod*)cb)->is_at_poll_or_poll_return(pc), @@ -658,15 +658,15 @@ address SharedRuntime::get_poll_stub(address pc) { bool at_poll_return = ((CompiledMethod*)cb)->is_at_poll_return(pc); bool has_wide_vectors = ((CompiledMethod*)cb)->has_wide_vectors(); if (at_poll_return) { - assert(SharedRuntime::polling_page_return_handler_blob() != NULL, + assert(SharedRuntime::polling_page_return_handler_blob() != nullptr, "polling page return stub not created yet"); stub = SharedRuntime::polling_page_return_handler_blob()->entry_point(); } else if (has_wide_vectors) { - assert(SharedRuntime::polling_page_vectors_safepoint_handler_blob() != NULL, + assert(SharedRuntime::polling_page_vectors_safepoint_handler_blob() != nullptr, "polling page vectors safepoint stub not created yet"); stub = SharedRuntime::polling_page_vectors_safepoint_handler_blob()->entry_point(); } else { - assert(SharedRuntime::polling_page_safepoint_handler_blob() != NULL, + assert(SharedRuntime::polling_page_safepoint_handler_blob() != nullptr, "polling page safepoint stub not created yet"); stub = SharedRuntime::polling_page_safepoint_handler_blob()->entry_point(); } @@ -691,13 +691,13 @@ void SharedRuntime::throw_and_post_jvmti_exception(JavaThread* current, Handle h methodHandle method = methodHandle(current, vfst.method()); int bci = vfst.bci(); MethodData* trap_mdo = method->method_data(); - if (trap_mdo != NULL) { + if (trap_mdo != nullptr) { // Set exception_seen if the exceptional bytecode is an invoke Bytecode_invoke call = Bytecode_invoke_check(method, bci); if (call.is_valid()) { ResourceMark rm(current); - ProfileData* pdata = trap_mdo->allocate_bci_to_data(bci, NULL); - if (pdata != NULL && pdata->is_BitData()) { + ProfileData* pdata = trap_mdo->allocate_bci_to_data(bci, nullptr); + if (pdata != nullptr && pdata->is_BitData()) { BitData* bit_data = (BitData*) pdata; bit_data->set_exception_seen(); } @@ -735,7 +735,7 @@ JRT_END // for given exception address SharedRuntime::compute_compiled_exc_handler(CompiledMethod* cm, address ret_pc, Handle& exception, bool force_unwind, bool top_frame_only, bool& recursive_exception_occurred) { - assert(cm != NULL, "must exist"); + assert(cm != nullptr, "must exist"); ResourceMark rm; #if INCLUDE_JVMCI @@ -744,7 +744,7 @@ address SharedRuntime::compute_compiled_exc_handler(CompiledMethod* cm, address int catch_pco = ret_pc - cm->code_begin(); ExceptionHandlerTable table(cm); HandlerTableEntry *t = table.entry_for(catch_pco, -1, 0); - if (t != NULL) { + if (t != nullptr) { return cm->code_begin() + t->pco(); } else { return Deoptimization::deoptimize_for_missing_exception_handler(cm); @@ -791,12 +791,12 @@ address SharedRuntime::compute_compiled_exc_handler(CompiledMethod* cm, address } if (!top_frame_only && handler_bci < 0 && !skip_scope_increment) { sd = sd->sender(); - if (sd != NULL) { + if (sd != nullptr) { bci = sd->bci(); } ++scope_depth; } - } while (recursive_exception || (!top_frame_only && handler_bci < 0 && sd != NULL)); + } while (recursive_exception || (!top_frame_only && handler_bci < 0 && sd != nullptr)); } // found handling method => lookup exception handler @@ -804,7 +804,7 @@ address SharedRuntime::compute_compiled_exc_handler(CompiledMethod* cm, address ExceptionHandlerTable table(nm); HandlerTableEntry *t = table.entry_for(catch_pco, handler_bci, scope_depth); - if (t == NULL && (nm->is_compiled_by_c1() || handler_bci != -1)) { + if (t == nullptr && (nm->is_compiled_by_c1() || handler_bci != -1)) { // Allow abbreviated catch tables. The idea is to allow a method // to materialize its exceptions without committing to the exact // routing of exceptions. In particular this is needed for adding @@ -815,13 +815,13 @@ address SharedRuntime::compute_compiled_exc_handler(CompiledMethod* cm, address } #ifdef COMPILER1 - if (t == NULL && nm->is_compiled_by_c1()) { - assert(nm->unwind_handler_begin() != NULL, ""); + if (t == nullptr && nm->is_compiled_by_c1()) { + assert(nm->unwind_handler_begin() != nullptr, ""); return nm->unwind_handler_begin(); } #endif - if (t == NULL) { + if (t == nullptr) { ttyLocker ttyl; tty->print_cr("MISSING EXCEPTION HANDLER for pc " INTPTR_FORMAT " and handler bci %d, catch_pco: %d", p2i(ret_pc), handler_bci, catch_pco); tty->print_cr(" Exception:"); @@ -832,7 +832,7 @@ address SharedRuntime::compute_compiled_exc_handler(CompiledMethod* cm, address nm->print(); nm->print_code(); guarantee(false, "missing exception handler"); - return NULL; + return nullptr; } return nm->code_begin() + t->pco(); @@ -853,13 +853,13 @@ JRT_ENTRY(void, SharedRuntime::throw_ArithmeticException(JavaThread* current)) JRT_END JRT_ENTRY(void, SharedRuntime::throw_NullPointerException(JavaThread* current)) - throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException(), NULL); + throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException(), nullptr); JRT_END JRT_ENTRY(void, SharedRuntime::throw_NullPointerException_at_call(JavaThread* current)) // This entry point is effectively only used for NullPointerExceptions which occur at inline // cache sites (when the callee activation is not yet set up) so we are at a call site - throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException(), NULL); + throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException(), nullptr); JRT_END JRT_ENTRY(void, SharedRuntime::throw_StackOverflowError(JavaThread* current)) @@ -897,7 +897,7 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* current, address pc, ImplicitExceptionKind exception_kind) { - address target_pc = NULL; + address target_pc = nullptr; if (Interpreter::contains(pc)) { switch (exception_kind) { @@ -919,7 +919,7 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* current, // deoptimization blob and uncommon trap blob bang the stack // in a debug VM to verify the correctness of the compiled // method stack banging. - assert(current->deopt_mark() == NULL, "no stack overflow from deopt blob/uncommon trap"); + assert(current->deopt_mark() == nullptr, "no stack overflow from deopt blob/uncommon trap"); Events::log_exception(current, "StackOverflowError at " INTPTR_FORMAT, p2i(pc)); return StubRoutines::throw_StackOverflowError_entry(); } @@ -932,8 +932,8 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* current, // caller-saved registers, as these entry points do. VtableStub* vt_stub = VtableStubs::stub_containing(pc); - // If vt_stub is NULL, then return NULL to signal handler to report the SEGV error. - if (vt_stub == NULL) return NULL; + // If vt_stub is null, then return null to signal handler to report the SEGV error. + if (vt_stub == nullptr) return nullptr; if (vt_stub->is_abstract_method_error(pc)) { assert(!vt_stub->is_vtable_stub(), "should never see AbstractMethodErrors from vtable-type VtableStubs"); @@ -952,8 +952,8 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* current, } else { CodeBlob* cb = CodeCache::find_blob(pc); - // If code blob is NULL, then return NULL to signal handler to report the SEGV error. - if (cb == NULL) return NULL; + // If code blob is null, then return null to signal handler to report the SEGV error. + if (cb == nullptr) return nullptr; // Exception happened in CodeCache. Must be either: // 1. Inline-cache check in C2I handler blob, @@ -964,7 +964,7 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* current, bool is_in_blob = cb->is_adapter_blob() || cb->is_method_handles_adapter_blob(); if (!is_in_blob) { // Allow normal crash reporting to handle this - return NULL; + return nullptr; } Events::log_exception(current, "NullPointerException in code blob at " INTPTR_FORMAT, p2i(pc)); // There is no handler here, so we will simply unwind. @@ -992,7 +992,7 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* current, _implicit_null_throws++; #endif target_pc = cm->continuation_for_implicit_null_exception(pc); - // If there's an unexpected fault, target_pc might be NULL, + // If there's an unexpected fault, target_pc might be null, // in which case we want to fall through into the normal // error handling code. } @@ -1003,12 +1003,12 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* current, case IMPLICIT_DIVIDE_BY_ZERO: { CompiledMethod* cm = CodeCache::find_compiled(pc); - guarantee(cm != NULL, "must have containing compiled method for implicit division-by-zero exceptions"); + guarantee(cm != nullptr, "must have containing compiled method for implicit division-by-zero exceptions"); #ifndef PRODUCT _implicit_div0_throws++; #endif target_pc = cm->continuation_for_implicit_div0_exception(pc); - // If there's an unexpected fault, target_pc might be NULL, + // If there's an unexpected fault, target_pc might be null, // in which case we want to fall through into the normal // error handling code. break; // fall through @@ -1036,7 +1036,7 @@ address SharedRuntime::continuation_for_implicit_exception(JavaThread* current, } ShouldNotReachHere(); - return NULL; + return nullptr; } @@ -1076,14 +1076,14 @@ JRT_ENTRY_NO_ASYNC(void, SharedRuntime::register_finalizer(JavaThread* current, JRT_END jlong SharedRuntime::get_java_tid(JavaThread* thread) { - assert(thread != NULL, "No thread"); - if (thread == NULL) { + assert(thread != nullptr, "No thread"); + if (thread == nullptr) { return 0; } guarantee(Thread::current() != thread || thread->is_oop_safe(), "current cannot touch oops after its GC barrier is detached."); oop obj = thread->threadObj(); - return (obj == NULL) ? 0 : java_lang_Thread::thread_id(obj); + return (obj == nullptr) ? 0 : java_lang_Thread::thread_id(obj); } /** @@ -1163,7 +1163,7 @@ Method* SharedRuntime::extract_attached_method(vframeStream& vfst) { CompiledICLocker ic_locker(caller); return caller->attached_method_before_pc(pc); } - return NULL; + return nullptr; } // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode @@ -1245,7 +1245,7 @@ Handle SharedRuntime::find_callee_info_helper(vframeStream& vfst, Bytecodes::Cod if (attached_method.is_null()) { Method* callee = bytecode.static_target(CHECK_NH); - if (callee == NULL) { + if (callee == nullptr) { THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle); } } @@ -1274,7 +1274,7 @@ Handle SharedRuntime::find_callee_info_helper(vframeStream& vfst, Bytecodes::Cod if (has_receiver) { assert(receiver.not_null(), "should have thrown exception"); Klass* receiver_klass = receiver->klass(); - Klass* rk = NULL; + Klass* rk = nullptr; if (attached_method.not_null()) { // In case there's resolved method attached, use its holder during the check. rk = attached_method->method_holder(); @@ -1368,16 +1368,16 @@ bool SharedRuntime::resolve_sub_helper_internal(methodHandle callee_method, cons // we are done patching the code. CompiledMethod* callee = callee_method->code(); - if (callee != NULL) { + if (callee != nullptr) { assert(callee->is_compiled(), "must be nmethod for patching"); } - if (callee != NULL && !callee->is_in_use()) { + if (callee != nullptr && !callee->is_in_use()) { // Patch call site to C2I adapter if callee nmethod is deoptimized or unloaded. - callee = NULL; + callee = nullptr; } #ifdef ASSERT - address dest_entry_point = callee == NULL ? 0 : callee->entry_point(); // used below + address dest_entry_point = callee == nullptr ? 0 : callee->entry_point(); // used below #endif bool is_nmethod = caller_nm->is_nmethod(); @@ -1385,7 +1385,7 @@ bool SharedRuntime::resolve_sub_helper_internal(methodHandle callee_method, cons if (is_virtual) { assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check"); bool static_bound = call_info.resolved_method()->can_be_statically_bound(); - Klass* klass = invoke_code == Bytecodes::_invokehandle ? NULL : receiver->klass(); + Klass* klass = invoke_code == Bytecodes::_invokehandle ? nullptr : receiver->klass(); CompiledIC::compute_monomorphic_entry(callee_method, klass, is_optimized, static_bound, is_nmethod, virtual_call_info, CHECK_false); @@ -1407,13 +1407,13 @@ bool SharedRuntime::resolve_sub_helper_internal(methodHandle callee_method, cons // which may happen when multiply alive nmethod (tiered compilation) // will be supported. if (!callee_method->is_old() && - (callee == NULL || (callee->is_in_use() && callee_method->code() == callee))) { + (callee == nullptr || (callee->is_in_use() && callee_method->code() == callee))) { NoSafepointVerifier nsv; #ifdef ASSERT // We must not try to patch to jump to an already unloaded method. if (dest_entry_point != 0) { CodeBlob* cb = CodeCache::find_blob(dest_entry_point); - assert((cb != NULL) && cb->is_compiled() && (((CompiledMethod*)cb) == callee), + assert((cb != nullptr) && cb->is_compiled() && (((CompiledMethod*)cb) == callee), "should not call unloaded nmethod"); } #endif @@ -1428,7 +1428,7 @@ bool SharedRuntime::resolve_sub_helper_internal(methodHandle callee_method, cons if (VM_Version::supports_fast_class_init_checks() && invoke_code == Bytecodes::_invokestatic && callee_method->needs_clinit_barrier() && - callee != NULL && callee->is_compiled_by_jvmci()) { + callee != nullptr && callee->is_compiled_by_jvmci()) { return true; // skip patching for JVMCI } CompiledStaticCall* ssc = caller_nm->compiledStaticCall_before(caller_frame.pc()); @@ -1454,12 +1454,12 @@ methodHandle SharedRuntime::resolve_sub_helper(bool is_virtual, bool is_optimize frame caller_frame = current->last_frame().sender(&cbl_map); CodeBlob* caller_cb = caller_frame.cb(); - guarantee(caller_cb != NULL && caller_cb->is_compiled(), "must be called from compiled method"); + guarantee(caller_cb != nullptr && caller_cb->is_compiled(), "must be called from compiled method"); CompiledMethod* caller_nm = caller_cb->as_compiled_method_or_null(); // determine call info & receiver - // note: a) receiver is NULL for static calls - // b) an exception is thrown if receiver is NULL for non-static calls + // note: a) receiver is null for static calls + // b) an exception is thrown if receiver is null for non-static calls CallInfo call_info; Bytecodes::Code invoke_code = Bytecodes::_illegal; Handle receiver = find_callee_info(invoke_code, call_info, CHECK_(methodHandle())); @@ -1560,7 +1560,7 @@ JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* current->set_vm_result_2(callee_method()); JRT_BLOCK_END // return compiled code entry point after potential safepoints - assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); + assert(callee_method->verified_code_entry() != nullptr, " Jump to zero!"); return callee_method->verified_code_entry(); JRT_END @@ -1588,9 +1588,9 @@ JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* current) caller_frame.is_entry_frame() || caller_frame.is_upcall_stub_frame()) { Method* callee = current->callee_target(); - guarantee(callee != NULL && callee->is_method(), "bad handshake"); + guarantee(callee != nullptr && callee->is_method(), "bad handshake"); current->set_vm_result_2(callee); - current->set_callee_target(NULL); + current->set_callee_target(nullptr); if (caller_frame.is_entry_frame() && VM_Version::supports_fast_class_init_checks()) { // Bypass class initialization checks in c2i when caller is in native. // JNI calls to static methods don't have class initialization checks. @@ -1614,7 +1614,7 @@ JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* current) current->set_vm_result_2(callee_method()); JRT_BLOCK_END // return compiled code entry point after potential safepoints - assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); + assert(callee_method->verified_code_entry() != nullptr, " Jump to zero!"); return callee_method->verified_code_entry(); JRT_END @@ -1644,7 +1644,7 @@ JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* methodHandle callee(current, invoke.static_target(current)); if (!callee.is_null()) { oop recv = callerFrame.retrieve_receiver(®_map); - Klass *recv_klass = (recv != NULL) ? recv->klass() : NULL; + Klass *recv_klass = (recv != nullptr) ? recv->klass() : nullptr; res = StubRoutines::forward_exception_entry(); LinkResolver::throw_abstract_method_error(callee, recv_klass, CHECK_(res)); } @@ -1669,7 +1669,7 @@ JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread* curren frame stub_frame = current->last_frame(); assert(stub_frame.is_runtime_frame(), "must be a runtimeStub"); frame caller = stub_frame.sender(®_map); - enter_special = caller.cb() != NULL && caller.cb()->is_compiled() + enter_special = caller.cb() != nullptr && caller.cb()->is_compiled() && caller.cb()->as_compiled_method()->method()->is_continuation_enter_intrinsic(); } JRT_BLOCK_END @@ -1686,7 +1686,7 @@ JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread* curren } // return compiled code entry point after potential safepoints - assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); + assert(callee_method->verified_code_entry() != nullptr, " Jump to zero!"); return callee_method->verified_code_entry(); JRT_END @@ -1699,7 +1699,7 @@ JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread* curre current->set_vm_result_2(callee_method()); JRT_BLOCK_END // return compiled code entry point after potential safepoints - assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); + assert(callee_method->verified_code_entry() != nullptr, " Jump to zero!"); return callee_method->verified_code_entry(); JRT_END @@ -1713,7 +1713,7 @@ JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread* c current->set_vm_result_2(callee_method()); JRT_BLOCK_END // return compiled code entry point after potential safepoints - assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); + assert(callee_method->verified_code_entry() != nullptr, " Jump to zero!"); return callee_method->verified_code_entry(); JRT_END @@ -1739,7 +1739,7 @@ bool SharedRuntime::handle_ic_miss_helper_internal(Handle receiver, CompiledMeth should_be_mono = true; } else if (inline_cache->is_icholder_call()) { CompiledICHolder* ic_oop = inline_cache->cached_icholder(); - if (ic_oop != NULL) { + if (ic_oop != nullptr) { if (!ic_oop->is_loader_alive()) { // Deferred IC cleaning due to concurrent class unloading if (!inline_cache->set_to_clean()) { @@ -1750,7 +1750,7 @@ bool SharedRuntime::handle_ic_miss_helper_internal(Handle receiver, CompiledMeth // This isn't a real miss. We must have seen that compiled code // is now available and we want the call site converted to a // monomorphic compiled call site. - // We can't assert for callee_method->code() != NULL because it + // We can't assert for callee_method->code() != nullptr because it // could have been deoptimized in the meantime if (TraceCallFixup) { ResourceMark rm(THREAD); @@ -1803,7 +1803,7 @@ methodHandle SharedRuntime::handle_ic_miss_helper(TRAPS) { CallInfo call_info; Bytecodes::Code bc; - // receiver is NULL for static calls. An exception is thrown for NULL + // receiver is null for static calls. An exception is thrown for null // receivers for non-static calls Handle receiver = find_callee_info(bc, call_info, CHECK_(methodHandle())); // Compiler1 can produce virtual call sites that can actually be statically bound @@ -1954,7 +1954,7 @@ methodHandle SharedRuntime::reresolve_call_site(TRAPS) { // we jump to it the target gets deoptimized. Similar to 1 // we will wind up in the interprter (thru a c2i with c2). // - address call_addr = NULL; + address call_addr = nullptr; { // Get call instruction under lock because another thread may be // busy patching it. @@ -1965,7 +1965,7 @@ methodHandle SharedRuntime::reresolve_call_site(TRAPS) { // Check relocations for the matching call to 1) avoid false positives, // and 2) determine the type. - if (call_addr != NULL) { + if (call_addr != nullptr) { // On x86 the logic for finding a call instruction is blindly checking for a call opcode 5 // bytes back in the instruction stream so we must also check for reloc info. RelocIterator iter(caller_nm, call_addr, call_addr+1); @@ -2061,7 +2061,7 @@ bool SharedRuntime::should_fixup_call_destination(address destination, address e if (destination != entry_point) { CodeBlob* callee = CodeCache::find_blob(destination); // callee == cb seems weird. It means calling interpreter thru stub. - if (callee != NULL && (callee == cb || callee->is_adapter_blob())) { + if (callee != nullptr && (callee == cb || callee->is_adapter_blob())) { // static call or optimized virtual if (TraceCallFixup) { tty->print("fixup callsite at " INTPTR_FORMAT " to compiled code for", p2i(caller_pc)); @@ -2113,12 +2113,12 @@ JRT_LEAF(void, SharedRuntime::fixup_callers_callsite(Method* method, address cal NoSafepointVerifier nsv; CompiledMethod* callee = moop->code(); - if (callee == NULL) { + if (callee == nullptr) { return; } CodeBlob* cb = CodeCache::find_blob(caller_pc); - if (cb == NULL || !cb->is_compiled() || callee->is_unloading()) { + if (cb == nullptr || !cb->is_compiled() || callee->is_unloading()) { return; } @@ -2135,13 +2135,13 @@ JRT_LEAF(void, SharedRuntime::fixup_callers_callsite(Method* method, address cal // There is a benign race here. We could be attempting to patch to a compiled // entry point at the same time the callee is being deoptimized. If that is // the case then entry_point may in fact point to a c2i and we'd patch the - // call site with the same old data. clear_code will set code() to NULL - // at the end of it. If we happen to see that NULL then we can skip trying + // call site with the same old data. clear_code will set code() to null + // at the end of it. If we happen to see that null then we can skip trying // to patch. If we hit the window where the callee has a c2i in the - // from_compiled_entry and the NULL isn't present yet then we lose the race + // from_compiled_entry and the null isn't present yet then we lose the race // and patch the code with the same old data. Asi es la vida. - if (moop->code() == NULL) return; + if (moop->code() == nullptr) return; if (nm->is_in_use()) { // Expect to find a native call there (unless it was no-inline cache vtable dispatch) @@ -2194,7 +2194,7 @@ JRT_ENTRY(void, SharedRuntime::slow_arraycopy_C(oopDesc* src, jint src_pos, _slow_array_copy_ctr++; #endif // Check if we have null pointers - if (src == NULL || dest == NULL) { + if (src == nullptr || dest == nullptr) { THROW(vmSymbols::java_lang_NullPointerException()); } // Do the copy. The casts to arrayOop are necessary to the copy_array API, @@ -2220,8 +2220,8 @@ char* SharedRuntime::generate_class_cast_message( Bytecode_checkcast cc(vfst.method(), vfst.method()->bcp_from(vfst.bci())); constantPoolHandle cpool(thread, vfst.method()->constants()); Klass* target_klass = ConstantPool::klass_at_if_loaded(cpool, cc.index()); - Symbol* target_klass_name = NULL; - if (target_klass == NULL) { + Symbol* target_klass_name = nullptr; + if (target_klass == nullptr) { // This klass should be resolved, but just in case, get the name in the klass slot. target_klass_name = cpool->klass_name_at(cc.index()); } @@ -2235,8 +2235,8 @@ char* SharedRuntime::generate_class_cast_message( Klass* caster_klass, Klass* target_klass, Symbol* target_klass_name) { const char* caster_name = caster_klass->external_name(); - assert(target_klass != NULL || target_klass_name != NULL, "one must be provided"); - const char* target_name = target_klass == NULL ? target_klass_name->as_klass_external_name() : + assert(target_klass != nullptr || target_klass_name != nullptr, "one must be provided"); + const char* target_name = target_klass == nullptr ? target_klass_name->as_klass_external_name() : target_klass->external_name(); size_t msglen = strlen(caster_name) + strlen("class ") + strlen(" cannot be cast to class ") + strlen(target_name) + 1; @@ -2244,19 +2244,19 @@ char* SharedRuntime::generate_class_cast_message( const char* caster_klass_description = ""; const char* target_klass_description = ""; const char* klass_separator = ""; - if (target_klass != NULL && caster_klass->module() == target_klass->module()) { + if (target_klass != nullptr && caster_klass->module() == target_klass->module()) { caster_klass_description = caster_klass->joint_in_module_of_loader(target_klass); } else { caster_klass_description = caster_klass->class_in_module_of_loader(); - target_klass_description = (target_klass != NULL) ? target_klass->class_in_module_of_loader() : ""; - klass_separator = (target_klass != NULL) ? "; " : ""; + target_klass_description = (target_klass != nullptr) ? target_klass->class_in_module_of_loader() : ""; + klass_separator = (target_klass != nullptr) ? "; " : ""; } // add 3 for parenthesis and preceding space msglen += strlen(caster_klass_description) + strlen(target_klass_description) + strlen(klass_separator) + 3; char* message = NEW_RESOURCE_ARRAY_RETURN_NULL(char, msglen); - if (message == NULL) { + if (message == nullptr) { // Shouldn't happen, but don't cause even more problems if it does message = const_cast(caster_klass->external_name()); } else { @@ -2326,7 +2326,7 @@ JRT_END void SharedRuntime::print_statistics() { ttyLocker ttyl; - if (xtty != NULL) xtty->head("statistics type='SharedRuntime'"); + if (xtty != nullptr) xtty->head("statistics type='SharedRuntime'"); SharedRuntime::print_ic_miss_histogram(); @@ -2363,7 +2363,7 @@ void SharedRuntime::print_statistics() { AdapterHandlerLibrary::print_statistics(); - if (xtty != NULL) xtty->tail("statistics"); + if (xtty != nullptr) xtty->tail("statistics"); } inline double percent(int64_t x, int64_t y) { @@ -2382,8 +2382,8 @@ class MethodArityHistogram { static int _max_size; // max. arg size seen static void add_method_to_histogram(nmethod* nm) { - Method* method = (nm == NULL) ? NULL : nm->method(); - if (method != NULL) { + Method* method = (nm == nullptr) ? nullptr : nm->method(); + if (method != nullptr) { ArgumentCount args(method->signature()); int arity = args.size() + (method->is_static() ? 0 : 1); int argsize = method->size_of_parameters(); @@ -2717,14 +2717,14 @@ static void print_table_statistics() { // --------------------------------------------------------------------------- // Implementation of AdapterHandlerLibrary -AdapterHandlerEntry* AdapterHandlerLibrary::_abstract_method_handler = NULL; -AdapterHandlerEntry* AdapterHandlerLibrary::_no_arg_handler = NULL; -AdapterHandlerEntry* AdapterHandlerLibrary::_int_arg_handler = NULL; -AdapterHandlerEntry* AdapterHandlerLibrary::_obj_arg_handler = NULL; -AdapterHandlerEntry* AdapterHandlerLibrary::_obj_int_arg_handler = NULL; -AdapterHandlerEntry* AdapterHandlerLibrary::_obj_obj_arg_handler = NULL; +AdapterHandlerEntry* AdapterHandlerLibrary::_abstract_method_handler = nullptr; +AdapterHandlerEntry* AdapterHandlerLibrary::_no_arg_handler = nullptr; +AdapterHandlerEntry* AdapterHandlerLibrary::_int_arg_handler = nullptr; +AdapterHandlerEntry* AdapterHandlerLibrary::_obj_arg_handler = nullptr; +AdapterHandlerEntry* AdapterHandlerLibrary::_obj_int_arg_handler = nullptr; +AdapterHandlerEntry* AdapterHandlerLibrary::_obj_obj_arg_handler = nullptr; const int AdapterHandlerLibrary_size = 16*K; -BufferBlob* AdapterHandlerLibrary::_buffer = NULL; +BufferBlob* AdapterHandlerLibrary::_buffer = nullptr; BufferBlob* AdapterHandlerLibrary::buffer_blob() { return _buffer; @@ -2751,11 +2751,11 @@ static void post_adapter_creation(const AdapterBlob* new_adapter, void AdapterHandlerLibrary::initialize() { ResourceMark rm; - AdapterBlob* no_arg_blob = NULL; - AdapterBlob* int_arg_blob = NULL; - AdapterBlob* obj_arg_blob = NULL; - AdapterBlob* obj_int_arg_blob = NULL; - AdapterBlob* obj_obj_arg_blob = NULL; + AdapterBlob* no_arg_blob = nullptr; + AdapterBlob* int_arg_blob = nullptr; + AdapterBlob* obj_arg_blob = nullptr; + AdapterBlob* obj_int_arg_blob = nullptr; + AdapterBlob* obj_obj_arg_blob = nullptr; { MutexLocker mu(AdapterHandlerLibrary_lock); @@ -2765,12 +2765,12 @@ void AdapterHandlerLibrary::initialize() { // Pass wrong_method_abstract for the c2i transitions to return // AbstractMethodError for invalid invocations. address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub(); - _abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(0, NULL), + _abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(0, nullptr), StubRoutines::throw_AbstractMethodError_entry(), wrong_method_abstract, wrong_method_abstract); _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size); - _no_arg_handler = create_adapter(no_arg_blob, 0, NULL, true); + _no_arg_handler = create_adapter(no_arg_blob, 0, nullptr, true); BasicType obj_args[] = { T_OBJECT }; _obj_arg_handler = create_adapter(obj_arg_blob, 1, obj_args, true); @@ -2784,11 +2784,11 @@ void AdapterHandlerLibrary::initialize() { BasicType obj_obj_args[] = { T_OBJECT, T_OBJECT }; _obj_obj_arg_handler = create_adapter(obj_obj_arg_blob, 2, obj_obj_args, true); - assert(no_arg_blob != NULL && - obj_arg_blob != NULL && - int_arg_blob != NULL && - obj_int_arg_blob != NULL && - obj_obj_arg_blob != NULL, "Initial adapters must be properly created"); + assert(no_arg_blob != nullptr && + obj_arg_blob != nullptr && + int_arg_blob != nullptr && + obj_int_arg_blob != nullptr && + obj_obj_arg_blob != nullptr, "Initial adapters must be properly created"); } // Outside of the lock @@ -2845,7 +2845,7 @@ AdapterHandlerEntry* AdapterHandlerLibrary::get_simple_adapter(const methodHandl return _obj_int_arg_handler; } } - return NULL; + return nullptr; } class AdapterSignatureIterator : public SignatureIterator { @@ -2898,12 +2898,12 @@ AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& meth // Fast-path for trivial adapters AdapterHandlerEntry* entry = get_simple_adapter(method); - if (entry != NULL) { + if (entry != nullptr) { return entry; } ResourceMark rm; - AdapterBlob* new_adapter = NULL; + AdapterBlob* new_adapter = nullptr; // Fill in the signature array, for the calling-convention call. int total_args_passed = method->size_of_parameters(); // All args on stack @@ -2918,12 +2918,12 @@ AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& meth // Lookup method signature's fingerprint entry = lookup(total_args_passed, sig_bt); - if (entry != NULL) { + if (entry != nullptr) { #ifdef ASSERT if (VerifyAdapterSharing) { - AdapterBlob* comparison_blob = NULL; + AdapterBlob* comparison_blob = nullptr; AdapterHandlerEntry* comparison_entry = create_adapter(comparison_blob, total_args_passed, sig_bt, false); - assert(comparison_blob == NULL, "no blob should be created when creating an adapter for comparison"); + assert(comparison_blob == nullptr, "no blob should be created when creating an adapter for comparison"); assert(comparison_entry->compare_code(entry), "code must match"); // Release the one just created and return the original delete comparison_entry; @@ -2936,7 +2936,7 @@ AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& meth } // Outside of the lock - if (new_adapter != NULL) { + if (new_adapter != nullptr) { post_adapter_creation(new_adapter, entry); } return entry; @@ -2951,7 +2951,7 @@ AdapterHandlerEntry* AdapterHandlerLibrary::create_adapter(AdapterBlob*& new_ada // VerifyAdapterCalls and VerifyAdapterSharing can fail if we re-use code that generated // prior to StubRoutines::code2() being set. Checks refer to checks generated in an I2C // stub that ensure that an I2C stub is called from an interpreter frame. - bool contains_all_checks = StubRoutines::code2() != NULL; + bool contains_all_checks = StubRoutines::code2() != nullptr; VMRegPair stack_regs[16]; VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed); @@ -2985,11 +2985,11 @@ AdapterHandlerEntry* AdapterHandlerLibrary::create_adapter(AdapterBlob*& new_ada new_adapter = AdapterBlob::create(&buffer); NOT_PRODUCT(int insts_size = buffer.insts_size()); - if (new_adapter == NULL) { + if (new_adapter == nullptr) { // CodeCache is full, disable compilation // Ought to log this but compile log is only per compile thread // and we're some non descript Java thread. - return NULL; + return nullptr; } entry->relocate(new_adapter->content_begin()); #ifndef PRODUCT @@ -3003,7 +3003,7 @@ AdapterHandlerEntry* AdapterHandlerLibrary::create_adapter(AdapterBlob*& new_ada tty->print_cr("c2i argument handler starts at " INTPTR_FORMAT, p2i(entry->get_c2i_entry())); if (Verbose || PrintStubCode) { address first_pc = entry->base_address(); - if (first_pc != NULL) { + if (first_pc != nullptr) { Disassembler::decode(first_pc, first_pc + insts_size, tty NOT_PRODUCT(COMMA &new_adapter->asm_remarks())); tty->cr(); @@ -3023,24 +3023,24 @@ AdapterHandlerEntry* AdapterHandlerLibrary::create_adapter(AdapterBlob*& new_ada address AdapterHandlerEntry::base_address() { address base = _i2c_entry; - if (base == NULL) base = _c2i_entry; - assert(base <= _c2i_entry || _c2i_entry == NULL, ""); - assert(base <= _c2i_unverified_entry || _c2i_unverified_entry == NULL, ""); - assert(base <= _c2i_no_clinit_check_entry || _c2i_no_clinit_check_entry == NULL, ""); + if (base == nullptr) base = _c2i_entry; + assert(base <= _c2i_entry || _c2i_entry == nullptr, ""); + assert(base <= _c2i_unverified_entry || _c2i_unverified_entry == nullptr, ""); + assert(base <= _c2i_no_clinit_check_entry || _c2i_no_clinit_check_entry == nullptr, ""); return base; } void AdapterHandlerEntry::relocate(address new_base) { address old_base = base_address(); - assert(old_base != NULL, ""); + assert(old_base != nullptr, ""); ptrdiff_t delta = new_base - old_base; - if (_i2c_entry != NULL) + if (_i2c_entry != nullptr) _i2c_entry += delta; - if (_c2i_entry != NULL) + if (_c2i_entry != nullptr) _c2i_entry += delta; - if (_c2i_unverified_entry != NULL) + if (_c2i_unverified_entry != nullptr) _c2i_unverified_entry += delta; - if (_c2i_no_clinit_check_entry != NULL) + if (_c2i_no_clinit_check_entry != nullptr) _c2i_no_clinit_check_entry += delta; assert(base_address() == new_base, ""); } @@ -3066,7 +3066,7 @@ void AdapterHandlerEntry::save_code(unsigned char* buffer, int length) { bool AdapterHandlerEntry::compare_code(AdapterHandlerEntry* other) { - assert(_saved_code != NULL && other->_saved_code != NULL, "code not saved"); + assert(_saved_code != nullptr && other->_saved_code != nullptr, "code not saved"); if (other->_saved_code_length != _saved_code_length) { return false; @@ -3085,7 +3085,7 @@ bool AdapterHandlerEntry::compare_code(AdapterHandlerEntry* other) { */ void AdapterHandlerLibrary::create_native_wrapper(const methodHandle& method) { ResourceMark rm; - nmethod* nm = NULL; + nmethod* nm = nullptr; // Check if memory should be freed before allocation CodeCache::gc_on_allocation(); @@ -3098,7 +3098,7 @@ void AdapterHandlerLibrary::create_native_wrapper(const methodHandle& method) { // Perform the work while holding the lock, but perform any printing outside the lock MutexLocker mu(AdapterHandlerLibrary_lock); // See if somebody beat us to it - if (method->code() != NULL) { + if (method->code() != nullptr) { return; } @@ -3108,7 +3108,7 @@ void AdapterHandlerLibrary::create_native_wrapper(const methodHandle& method) { ResourceMark rm; BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache - if (buf != NULL) { + if (buf != nullptr) { CodeBuffer buffer(buf); if (method->is_continuation_enter_intrinsic()) { @@ -3149,7 +3149,7 @@ void AdapterHandlerLibrary::create_native_wrapper(const methodHandle& method) { // Generate the compiled-to-native wrapper code nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type); - if (nm != NULL) { + if (nm != nullptr) { { MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag); if (nm->make_in_use()) { @@ -3168,7 +3168,7 @@ void AdapterHandlerLibrary::create_native_wrapper(const methodHandle& method) { // Install the generated code. - if (nm != NULL) { + if (nm != nullptr) { const char *msg = method->is_static() ? "(static)" : ""; CompileTask::print_ul(nm, msg); if (PrintCompilation) { @@ -3286,7 +3286,7 @@ JRT_LEAF(intptr_t*, SharedRuntime::OSR_migration_begin( JavaThread *current) ) for (BasicObjectLock *kptr = fr.interpreter_frame_monitor_end(); kptr < fr.interpreter_frame_monitor_begin(); kptr = fr.next_monitor_in_interpreter_frame(kptr) ) { - if (kptr->obj() != NULL) active_monitor_count++; + if (kptr->obj() != nullptr) active_monitor_count++; } // QQQ we could place number of active monitors in the array so that compiled code @@ -3310,7 +3310,7 @@ JRT_LEAF(intptr_t*, SharedRuntime::OSR_migration_begin( JavaThread *current) ) for (BasicObjectLock *kptr2 = fr.interpreter_frame_monitor_end(); kptr2 < fr.interpreter_frame_monitor_begin(); kptr2 = fr.next_monitor_in_interpreter_frame(kptr2) ) { - if (kptr2->obj() != NULL) { // Avoid 'holes' in the monitor array + if (kptr2->obj() != nullptr) { // Avoid 'holes' in the monitor array BasicLock *lock = kptr2->lock(); // Inflate so the object's header no longer refers to the BasicLock. if (lock->displaced_header().is_unlocked()) { @@ -3372,16 +3372,16 @@ void AdapterHandlerLibrary::print_handler_on(outputStream* st, const CodeBlob* b void AdapterHandlerEntry::print_adapter_on(outputStream* st) const { st->print("AHE@" INTPTR_FORMAT ": %s", p2i(this), fingerprint()->as_string()); - if (get_i2c_entry() != NULL) { + if (get_i2c_entry() != nullptr) { st->print(" i2c: " INTPTR_FORMAT, p2i(get_i2c_entry())); } - if (get_c2i_entry() != NULL) { + if (get_c2i_entry() != nullptr) { st->print(" c2i: " INTPTR_FORMAT, p2i(get_c2i_entry())); } - if (get_c2i_unverified_entry() != NULL) { + if (get_c2i_unverified_entry() != nullptr) { st->print(" c2iUV: " INTPTR_FORMAT, p2i(get_c2i_unverified_entry())); } - if (get_c2i_no_clinit_check_entry() != NULL) { + if (get_c2i_no_clinit_check_entry() != nullptr) { st->print(" c2iNCI: " INTPTR_FORMAT, p2i(get_c2i_no_clinit_check_entry())); } st->cr(); @@ -3405,7 +3405,7 @@ JRT_END frame SharedRuntime::look_for_reserved_stack_annotated_method(JavaThread* current, frame fr) { ResourceMark rm(current); frame activation; - CompiledMethod* nm = NULL; + CompiledMethod* nm = nullptr; int count = 1; assert(fr.is_java_frame(), "Must start on Java frame"); @@ -3419,23 +3419,23 @@ frame SharedRuntime::look_for_reserved_stack_annotated_method(JavaThread* curren continue; } - Method* method = NULL; + Method* method = nullptr; bool found = false; if (fr.is_interpreted_frame()) { method = fr.interpreter_frame_method(); - if (method != NULL && method->has_reserved_stack_access()) { + if (method != nullptr && method->has_reserved_stack_access()) { found = true; } } else { CodeBlob* cb = fr.cb(); - if (cb != NULL && cb->is_compiled()) { + if (cb != nullptr && cb->is_compiled()) { nm = cb->as_compiled_method(); method = nm->method(); // scope_desc_near() must be used, instead of scope_desc_at() because on // SPARC, the pcDesc can be on the delay slot after the call instruction. - for (ScopeDesc *sd = nm->scope_desc_near(fr.pc()); sd != NULL; sd = sd->sender()) { + for (ScopeDesc *sd = nm->scope_desc_near(fr.pc()); sd != nullptr; sd = sd->sender()) { method = sd->method(); - if (method != NULL && method->has_reserved_stack_access()) { + if (method != nullptr && method->has_reserved_stack_access()) { found = true; } } @@ -3463,7 +3463,7 @@ void SharedRuntime::on_slowpath_allocation_exit(JavaThread* current) { // GC may take any compensating steps. oop new_obj = current->vm_result(); - if (new_obj == NULL) return; + if (new_obj == nullptr) return; BarrierSet *bs = BarrierSet::barrier_set(); bs->on_slowpath_allocation_exit(current, new_obj); diff --git a/src/hotspot/share/runtime/sharedRuntime.hpp b/src/hotspot/share/runtime/sharedRuntime.hpp index 7aedab8da0b4d..83685abefa0c5 100644 --- a/src/hotspot/share/runtime/sharedRuntime.hpp +++ b/src/hotspot/share/runtime/sharedRuntime.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -216,17 +216,17 @@ class SharedRuntime: AllStatic { static address get_poll_stub(address pc); static address get_ic_miss_stub() { - assert(_ic_miss_blob!= NULL, "oops"); + assert(_ic_miss_blob!= nullptr, "oops"); return _ic_miss_blob->entry_point(); } static address get_handle_wrong_method_stub() { - assert(_wrong_method_blob!= NULL, "oops"); + assert(_wrong_method_blob!= nullptr, "oops"); return _wrong_method_blob->entry_point(); } static address get_handle_wrong_method_abstract_stub() { - assert(_wrong_method_abstract_blob!= NULL, "oops"); + assert(_wrong_method_abstract_blob!= nullptr, "oops"); return _wrong_method_abstract_blob->entry_point(); } @@ -236,15 +236,15 @@ class SharedRuntime: AllStatic { #endif // COMPILER2 static address get_resolve_opt_virtual_call_stub() { - assert(_resolve_opt_virtual_call_blob != NULL, "oops"); + assert(_resolve_opt_virtual_call_blob != nullptr, "oops"); return _resolve_opt_virtual_call_blob->entry_point(); } static address get_resolve_virtual_call_stub() { - assert(_resolve_virtual_call_blob != NULL, "oops"); + assert(_resolve_virtual_call_blob != nullptr, "oops"); return _resolve_virtual_call_blob->entry_point(); } static address get_resolve_static_call_stub() { - assert(_resolve_static_call_blob != NULL, "oops"); + assert(_resolve_static_call_blob != nullptr, "oops"); return _resolve_static_call_blob->entry_point(); } @@ -264,7 +264,7 @@ class SharedRuntime: AllStatic { // Helper routine for full-speed JVMTI exception throwing support static void throw_and_post_jvmti_exception(JavaThread* current, Handle h_exception); - static void throw_and_post_jvmti_exception(JavaThread* current, Symbol* name, const char *message = NULL); + static void throw_and_post_jvmti_exception(JavaThread* current, Symbol* name, const char *message = nullptr); // RedefineClasses() tracing support for obsolete method entry static int rc_trace_method_entry(JavaThread* thread, Method* m); @@ -315,7 +315,7 @@ class SharedRuntime: AllStatic { // The caller (or one of it's callers) must use a ResourceMark // in order to correctly free the result. // - static char* generate_class_cast_message(Klass* caster_klass, Klass* target_klass, Symbol* target_klass_name = NULL); + static char* generate_class_cast_message(Klass* caster_klass, Klass* target_klass, Symbol* target_klass_name = nullptr); // Resolves a call site- may patch in the destination of the call into the // compiled code. @@ -379,7 +379,7 @@ class SharedRuntime: AllStatic { // Some architectures require that an argument must be passed in a register // AND in a stack slot. These architectures provide a second VMRegPair array // to be filled by the c_calling_convention method. On other architectures, - // NULL is being passed as the second VMRegPair array, so arguments are either + // null is being passed as the second VMRegPair array, so arguments are either // passed in a register OR in a stack slot. static int c_calling_convention(const BasicType *sig_bt, VMRegPair *regs, VMRegPair *regs2, int total_args_passed); @@ -686,7 +686,7 @@ class AdapterHandlerLibrary: public AllStatic { address i2c_entry, address c2i_entry, address c2i_unverified_entry, - address c2i_no_clinit_check_entry = NULL); + address c2i_no_clinit_check_entry = nullptr); static void create_native_wrapper(const methodHandle& method); static AdapterHandlerEntry* get_adapter(const methodHandle& method); diff --git a/src/hotspot/share/runtime/signature.cpp b/src/hotspot/share/runtime/signature.cpp index 6beac4b366b52..30eae826c9fe6 100644 --- a/src/hotspot/share/runtime/signature.cpp +++ b/src/hotspot/share/runtime/signature.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -142,7 +142,7 @@ static int compute_num_stack_arg_slots(Symbol* signature, int sizeargs, bool is_ void Fingerprinter::compute_fingerprint_and_return_type(bool static_flag) { // See if we fingerprinted this method already - if (_method != NULL) { + if (_method != nullptr) { assert(!static_flag, "must not be passed by caller"); static_flag = _method->is_static(); _fingerprint = _method->constMethod()->fingerprint(); @@ -189,7 +189,7 @@ void Fingerprinter::compute_fingerprint_and_return_type(bool static_flag) { #endif // Detect overflow. (We counted _param_size correctly.) - if (_method == NULL && _param_size > fp_max_size_of_parameters) { + if (_method == nullptr && _param_size > fp_max_size_of_parameters) { // We did a one-pass computation of argument size, return type, // and fingerprint. _fingerprint = overflow_fingerprint(); @@ -206,7 +206,7 @@ void Fingerprinter::compute_fingerprint_and_return_type(bool static_flag) { _fingerprint = _accumulator; // Cache the result on the method itself: - if (_method != NULL) { + if (_method != nullptr) { _method->constMethod()->set_fingerprint(_fingerprint); } } @@ -304,22 +304,22 @@ SignatureStream::SignatureStream(const Symbol* signature, _array_prefix = 0; // just for definiteness // assigning java/lang/Object to _previous_name means we can - // avoid a number of NULL checks in the parser + // avoid a number of null checks in the parser _previous_name = vmSymbols::java_lang_Object(); - _names = NULL; + _names = nullptr; next(); } SignatureStream::~SignatureStream() { if (_previous_name == vmSymbols::java_lang_Object()) { // no names were created - assert(_names == NULL, "_names unexpectedly created"); + assert(_names == nullptr, "_names unexpectedly created"); return; } // decrement refcount for names created during signature parsing _previous_name->decrement_refcount(); - if (_names != NULL) { + if (_names != nullptr) { for (int i = 0; i < _names->length(); i++) { _names->at(i)->decrement_refcount(); } @@ -334,7 +334,7 @@ inline int SignatureStream::scan_type(BasicType type) { switch (type) { case T_OBJECT: tem = (const u1*) memchr(&base[end], JVM_SIGNATURE_ENDCLASS, limit - end); - return (tem == NULL ? limit : tem + 1 - base); + return (tem == nullptr ? limit : tem + 1 - base); case T_ARRAY: while ((end < limit) && ((char)base[end] == JVM_SIGNATURE_ARRAY)) { end++; } @@ -346,7 +346,7 @@ inline int SignatureStream::scan_type(BasicType type) { _array_prefix = end - _end; // number of '[' chars just skipped if (Signature::has_envelope(base[end])) { tem = (const u1 *) memchr(&base[end], JVM_SIGNATURE_ENDCLASS, limit - end); - return (tem == NULL ? limit : tem + 1 - base); + return (tem == nullptr ? limit : tem + 1 - base); } // Skipping over a single character for a primitive type. assert(is_java_primitive(decode_signature_char(base[end])), "only primitives expected"); @@ -489,7 +489,7 @@ Symbol* SignatureStream::find_symbol() { // Only allocate the GrowableArray for the _names buffer if more than // one name is being processed in the signature. if (!_previous_name->is_permanent()) { - if (_names == NULL) { + if (_names == nullptr) { _names = new GrowableArray(10); } _names->push(_previous_name); @@ -501,12 +501,12 @@ Symbol* SignatureStream::find_symbol() { Klass* SignatureStream::as_klass(Handle class_loader, Handle protection_domain, FailureMode failure_mode, TRAPS) { if (!is_reference()) { - return NULL; + return nullptr; } Symbol* name = as_symbol(); - Klass* k = NULL; + Klass* k = nullptr; if (failure_mode == ReturnNull) { - // Note: SD::resolve_or_null returns NULL for most failure modes, + // Note: SD::resolve_or_null returns null for most failure modes, // but not all. Circularity errors, invalid PDs, etc., throw. k = SystemDictionary::resolve_or_null(name, class_loader, protection_domain, CHECK_NULL); } else if (failure_mode == CachedOrNull) { @@ -534,8 +534,8 @@ oop SignatureStream::as_java_mirror(Handle class_loader, Handle protection_domai return Universe::java_mirror(type()); } Klass* klass = as_klass(class_loader, protection_domain, failure_mode, CHECK_NULL); - if (klass == NULL) { - return NULL; + if (klass == nullptr) { + return nullptr; } return klass->java_mirror(); } @@ -553,13 +553,13 @@ ResolvingSignatureStream::ResolvingSignatureStream(Symbol* signature, : SignatureStream(signature, is_method), _class_loader(class_loader), _protection_domain(protection_domain) { - initialize_load_origin(NULL); + initialize_load_origin(nullptr); } ResolvingSignatureStream::ResolvingSignatureStream(Symbol* signature, Klass* load_origin, bool is_method) : SignatureStream(signature, is_method) { - assert(load_origin != NULL, ""); + assert(load_origin != nullptr, ""); initialize_load_origin(load_origin); } @@ -570,7 +570,7 @@ ResolvingSignatureStream::ResolvingSignatureStream(const Method* method) } void ResolvingSignatureStream::cache_handles() { - assert(_load_origin != NULL, ""); + assert(_load_origin != nullptr, ""); JavaThread* current = JavaThread::current(); _class_loader = Handle(current, _load_origin->class_loader()); _protection_domain = Handle(current, _load_origin->protection_domain()); @@ -600,7 +600,7 @@ bool SignatureVerifier::is_valid_method_signature(Symbol* sig) { const char* method_sig = (const char*)sig->bytes(); ssize_t len = sig->utf8_length(); ssize_t index = 0; - if (method_sig != NULL && len > 1 && method_sig[index] == JVM_SIGNATURE_FUNC) { + if (method_sig != nullptr && len > 1 && method_sig[index] == JVM_SIGNATURE_FUNC) { ++index; while (index < len && method_sig[index] != JVM_SIGNATURE_ENDFUNC) { ssize_t res = is_valid_type(&method_sig[index], len - index); @@ -622,7 +622,7 @@ bool SignatureVerifier::is_valid_method_signature(Symbol* sig) { bool SignatureVerifier::is_valid_type_signature(Symbol* sig) { const char* type_sig = (const char*)sig->bytes(); ssize_t len = sig->utf8_length(); - return (type_sig != NULL && len >= 1 && + return (type_sig != nullptr && len >= 1 && (is_valid_type(type_sig, len) == len)); } diff --git a/src/hotspot/share/runtime/signature.hpp b/src/hotspot/share/runtime/signature.hpp index f56bfd977be41..3f7d062ab6654 100644 --- a/src/hotspot/share/runtime/signature.hpp +++ b/src/hotspot/share/runtime/signature.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -359,7 +359,7 @@ class Fingerprinter: public SignatureIterator { } Fingerprinter(Symbol* signature, bool is_static) : SignatureIterator(signature), - _method(NULL) { + _method(nullptr) { compute_fingerprint_and_return_type(is_static); } }; @@ -575,7 +575,7 @@ class ResolvingSignatureStream : public SignatureStream { void initialize_load_origin(Klass* load_origin) { _load_origin = load_origin; - _handles_cached = (load_origin == NULL); + _handles_cached = (load_origin == nullptr); } void need_handles() { if (!_handles_cached) { diff --git a/src/hotspot/share/runtime/stackChunkFrameStream.hpp b/src/hotspot/share/runtime/stackChunkFrameStream.hpp index e6f2bef50b4b5..6937feb151753 100644 --- a/src/hotspot/share/runtime/stackChunkFrameStream.hpp +++ b/src/hotspot/share/runtime/stackChunkFrameStream.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -75,7 +75,7 @@ class StackChunkFrameStream : public StackObj { inline bool is_stub() const; inline bool is_compiled() const; CodeBlob* cb() const { return _cb; } - const ImmutableOopMap* oopmap() const { if (_oopmap == NULL) get_oopmap(); return _oopmap; } + const ImmutableOopMap* oopmap() const { if (_oopmap == nullptr) get_oopmap(); return _oopmap; } inline int frame_size() const; inline int stack_argsize() const; inline int num_oops() const; diff --git a/src/hotspot/share/runtime/stackValue.cpp b/src/hotspot/share/runtime/stackValue.cpp index 50ae8248ed904..9934cb060f67a 100644 --- a/src/hotspot/share/runtime/stackValue.cpp +++ b/src/hotspot/share/runtime/stackValue.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -58,7 +58,7 @@ static oop oop_from_oop_location(stackChunkOop chunk, void* addr) { // When compressed oops is enabled, an oop location may // contain narrow oop values - we deal with that here - if (chunk != NULL && chunk->has_bitmap()) { + if (chunk != nullptr && chunk->has_bitmap()) { // Transformed stack chunk with narrow oops return chunk->load_oop((narrowOop*)addr); } @@ -67,14 +67,14 @@ static oop oop_from_oop_location(stackChunkOop chunk, void* addr) { if (CompressedOops::is_base(*(void**)addr)) { // Compiled code may produce decoded oop = narrow_oop_base // when a narrow oop implicit null check is used. - // The narrow_oop_base could be NULL or be the address - // of the page below heap. Use NULL value for both cases. + // The narrow_oop_base could be null or be the address + // of the page below heap. Use null value for both cases. return nullptr; } #endif } - if (chunk != NULL) { + if (chunk != nullptr) { // Load oop from chunk return chunk->load_oop((oop*)addr); } @@ -99,7 +99,7 @@ static oop oop_from_narrowOop_location(stackChunkOop chunk, void* addr, bool is_ narrow_addr = (narrowOop*)addr; } - if (chunk != NULL) { + if (chunk != nullptr) { // Load oop from chunk return chunk->load_oop(narrow_addr); } @@ -111,7 +111,7 @@ static oop oop_from_narrowOop_location(stackChunkOop chunk, void* addr, bool is_ StackValue* StackValue::create_stack_value_from_oop_location(stackChunkOop chunk, void* addr) { oop val = oop_from_oop_location(chunk, addr); assert(oopDesc::is_oop_or_null(val), "bad oop found at " INTPTR_FORMAT " in_cont: %d compressed: %d", - p2i(addr), chunk != NULL, chunk != NULL && chunk->has_bitmap() && UseCompressedOops); + p2i(addr), chunk != nullptr, chunk != nullptr && chunk->has_bitmap() && UseCompressedOops); Handle h(Thread::current(), val); // Wrap a handle around the oop return new StackValue(h); } @@ -119,7 +119,7 @@ StackValue* StackValue::create_stack_value_from_oop_location(stackChunkOop chunk StackValue* StackValue::create_stack_value_from_narrowOop_location(stackChunkOop chunk, void* addr, bool is_register) { oop val = oop_from_narrowOop_location(chunk, addr, is_register); assert(oopDesc::is_oop_or_null(val), "bad oop found at " INTPTR_FORMAT " in_cont: %d compressed: %d", - p2i(addr), chunk != NULL, chunk != NULL && chunk->has_bitmap() && UseCompressedOops); + p2i(addr), chunk != nullptr, chunk != nullptr && chunk->has_bitmap() && UseCompressedOops); Handle h(Thread::current(), val); // Wrap a handle around the oop return new StackValue(h); } @@ -241,11 +241,11 @@ template address StackValue::stack_value_address(const frame* fr, const SmallReg template address StackValue::stack_value_address(const frame* fr, const RegisterMapT* reg_map, ScopeValue* sv) { if (!sv->is_location()) { - return NULL; + return nullptr; } Location loc = ((LocationValue *)sv)->location(); if (loc.type() == Location::invalid) { - return NULL; + return nullptr; } if (!reg_map->in_cont()) { @@ -256,7 +256,7 @@ address StackValue::stack_value_address(const frame* fr, const RegisterMapT* reg // before any extension by its callee (due to Compiler1 linkage on SPARC), must be used. : ((address)fr->unextended_sp()) + loc.stack_offset(); - assert(value_addr == NULL || reg_map->thread() == NULL || reg_map->thread()->is_in_usable_stack(value_addr), INTPTR_FORMAT, p2i(value_addr)); + assert(value_addr == nullptr || reg_map->thread() == nullptr || reg_map->thread()->is_in_usable_stack(value_addr), INTPTR_FORMAT, p2i(value_addr)); return value_addr; } @@ -264,7 +264,7 @@ address StackValue::stack_value_address(const frame* fr, const RegisterMapT* reg ? reg_map->as_RegisterMap()->stack_chunk()->reg_to_location(*fr, reg_map->as_RegisterMap(), VMRegImpl::as_VMReg(loc.register_number())) : reg_map->as_RegisterMap()->stack_chunk()->usp_offset_to_location(*fr, loc.stack_offset()); - assert(value_addr == NULL || Continuation::is_in_usable_stack(value_addr, reg_map->as_RegisterMap()) || (reg_map->thread() != NULL && reg_map->thread()->is_in_usable_stack(value_addr)), INTPTR_FORMAT, p2i(value_addr)); + assert(value_addr == nullptr || Continuation::is_in_usable_stack(value_addr, reg_map->as_RegisterMap()) || (reg_map->thread() != nullptr && reg_map->thread()->is_in_usable_stack(value_addr)), INTPTR_FORMAT, p2i(value_addr)); return value_addr; } @@ -292,10 +292,10 @@ void StackValue::print_on(outputStream* st) const { break; case T_OBJECT: - if (_handle_value() != NULL) { + if (_handle_value() != nullptr) { _handle_value()->print_value_on(st); } else { - st->print("NULL"); + st->print("null"); } st->print(" <" INTPTR_FORMAT ">", p2i(_handle_value())); break; diff --git a/src/hotspot/share/runtime/stackWatermark.cpp b/src/hotspot/share/runtime/stackWatermark.cpp index e1d8244f28465..717c3b535c511 100644 --- a/src/hotspot/share/runtime/stackWatermark.cpp +++ b/src/hotspot/share/runtime/stackWatermark.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -161,9 +161,9 @@ void StackWatermarkFramesIterator::next() { StackWatermark::StackWatermark(JavaThread* jt, StackWatermarkKind kind, uint32_t epoch) : _state(StackWatermarkState::create(epoch, true /* is_done */)), _watermark(0), - _next(NULL), + _next(nullptr), _jt(jt), - _iterator(NULL), + _iterator(nullptr), _lock(Mutex::stackwatermark, "StackWatermark_lock"), _kind(kind), _linked_watermarks() { @@ -216,7 +216,7 @@ void StackWatermark::start_processing_impl(void* context) { _iterator->process_one(context); _iterator->process_one(context); } else { - _iterator = NULL; + _iterator = nullptr; } update_watermark(); } @@ -228,7 +228,7 @@ void StackWatermark::yield_processing() { void StackWatermark::update_watermark() { assert(_lock.owned_by_self(), "invariant"); - if (_iterator != NULL && _iterator->has_next()) { + if (_iterator != nullptr && _iterator->has_next()) { assert(_iterator->callee() != 0, "sanity"); Atomic::release_store(&_watermark, _iterator->callee()); Atomic::release_store(&_state, StackWatermarkState::create(epoch_id(), false /* is_done */)); // release watermark w.r.t. epoch @@ -243,9 +243,9 @@ void StackWatermark::update_watermark() { void StackWatermark::process_one() { MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag); if (!processing_started()) { - start_processing_impl(NULL /* context */); + start_processing_impl(nullptr /* context */); } else if (!processing_completed()) { - _iterator->process_one(NULL /* context */); + _iterator->process_one(nullptr /* context */); update_watermark(); } } @@ -299,7 +299,7 @@ void StackWatermark::process_linked_watermarks() { // Finish processing all linked stack watermarks for (StackWatermark* watermark : _linked_watermarks) { - watermark->finish_processing(NULL /* context */); + watermark->finish_processing(nullptr /* context */); } } @@ -316,7 +316,7 @@ void StackWatermark::start_processing() { if (!processing_started_acquire()) { MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag); if (!processing_started()) { - start_processing_impl(NULL /* context */); + start_processing_impl(nullptr /* context */); } } } diff --git a/src/hotspot/share/runtime/stackWatermarkSet.cpp b/src/hotspot/share/runtime/stackWatermarkSet.cpp index fb5e2a36b9155..a49b1bfc30794 100644 --- a/src/hotspot/share/runtime/stackWatermarkSet.cpp +++ b/src/hotspot/share/runtime/stackWatermarkSet.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,11 +37,11 @@ #include "utilities/vmError.hpp" StackWatermarks::StackWatermarks() : - _head(NULL) {} + _head(nullptr) {} StackWatermarks::~StackWatermarks() { StackWatermark* current = _head; - while (current != NULL) { + while (current != nullptr) { StackWatermark* next = current->next(); delete current; current = next; @@ -81,7 +81,7 @@ static void verify_processing_context() { void StackWatermarkSet::before_unwind(JavaThread* jt) { verify_processing_context(); assert(jt->has_last_Java_frame(), "must have a Java frame"); - for (StackWatermark* current = head(jt); current != NULL; current = current->next()) { + for (StackWatermark* current = head(jt); current != nullptr; current = current->next()) { current->before_unwind(); } SafepointMechanism::update_poll_values(jt); @@ -90,7 +90,7 @@ void StackWatermarkSet::before_unwind(JavaThread* jt) { void StackWatermarkSet::after_unwind(JavaThread* jt) { verify_processing_context(); assert(jt->has_last_Java_frame(), "must have a Java frame"); - for (StackWatermark* current = head(jt); current != NULL; current = current->next()) { + for (StackWatermark* current = head(jt); current != nullptr; current = current->next()) { current->after_unwind(); } SafepointMechanism::update_poll_values(jt); @@ -102,7 +102,7 @@ void StackWatermarkSet::on_iteration(JavaThread* jt, const frame& fr) { return; } verify_processing_context(); - for (StackWatermark* current = head(jt); current != NULL; current = current->next()) { + for (StackWatermark* current = head(jt); current != nullptr; current = current->next()) { current->on_iteration(fr); } // We don't call SafepointMechanism::update_poll_values here, because the thread @@ -111,7 +111,7 @@ void StackWatermarkSet::on_iteration(JavaThread* jt, const frame& fr) { void StackWatermarkSet::on_safepoint(JavaThread* jt) { StackWatermark* watermark = get(jt, StackWatermarkKind::gc); - if (watermark != NULL) { + if (watermark != nullptr) { watermark->on_safepoint(); } } @@ -120,7 +120,7 @@ void StackWatermarkSet::start_processing(JavaThread* jt, StackWatermarkKind kind verify_processing_context(); assert(!jt->is_terminated(), "Poll after termination is a bug"); StackWatermark* watermark = get(jt, kind); - if (watermark != NULL) { + if (watermark != nullptr) { watermark->start_processing(); } // We don't call SafepointMechanism::update_poll_values here, because the thread @@ -129,7 +129,7 @@ void StackWatermarkSet::start_processing(JavaThread* jt, StackWatermarkKind kind } bool StackWatermarkSet::processing_started(JavaThread* jt) { - for (StackWatermark* current = head(jt); current != NULL; current = current->next()) { + for (StackWatermark* current = head(jt); current != nullptr; current = current->next()) { if (!current->processing_started()) { return false; } @@ -139,7 +139,7 @@ bool StackWatermarkSet::processing_started(JavaThread* jt) { void StackWatermarkSet::finish_processing(JavaThread* jt, void* context, StackWatermarkKind kind) { StackWatermark* watermark = get(jt, kind); - if (watermark != NULL) { + if (watermark != nullptr) { watermark->finish_processing(context); } // We don't call SafepointMechanism::update_poll_values here, because the thread @@ -149,7 +149,7 @@ void StackWatermarkSet::finish_processing(JavaThread* jt, void* context, StackWa uintptr_t StackWatermarkSet::lowest_watermark(JavaThread* jt) { uintptr_t max_watermark = uintptr_t(0) - 1; uintptr_t watermark = max_watermark; - for (StackWatermark* current = head(jt); current != NULL; current = current->next()) { + for (StackWatermark* current = head(jt); current != nullptr; current = current->next()) { watermark = MIN2(watermark, current->watermark()); } if (watermark == max_watermark) { diff --git a/src/hotspot/share/runtime/stackWatermarkSet.inline.hpp b/src/hotspot/share/runtime/stackWatermarkSet.inline.hpp index b0bd681b590d2..1b2280fed42e2 100644 --- a/src/hotspot/share/runtime/stackWatermarkSet.inline.hpp +++ b/src/hotspot/share/runtime/stackWatermarkSet.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,12 +30,12 @@ #include "runtime/stackWatermark.hpp" inline StackWatermark* StackWatermarkSet::get(JavaThread* jt, StackWatermarkKind kind) { - for (StackWatermark* stack_watermark = head(jt); stack_watermark != NULL; stack_watermark = stack_watermark->next()) { + for (StackWatermark* stack_watermark = head(jt); stack_watermark != nullptr; stack_watermark = stack_watermark->next()) { if (stack_watermark->kind() == kind) { return stack_watermark; } } - return NULL; + return nullptr; } template @@ -44,7 +44,7 @@ inline T* StackWatermarkSet::get(JavaThread* jt, StackWatermarkKind kind) { } inline bool StackWatermarkSet::has_watermark(JavaThread* jt, StackWatermarkKind kind) { - return get(jt, kind) != NULL; + return get(jt, kind) != nullptr; } #endif // SHARE_RUNTIME_STACKWATERMARKSET_INLINE_HPP diff --git a/src/hotspot/share/runtime/statSampler.cpp b/src/hotspot/share/runtime/statSampler.cpp index f4889472c4671..5d2e102ada4d2 100644 --- a/src/hotspot/share/runtime/statSampler.cpp +++ b/src/hotspot/share/runtime/statSampler.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -50,8 +50,8 @@ class StatSamplerTask : public PeriodicTask { //---------------------------------------------------------- // Implementation of StatSampler -StatSamplerTask* StatSampler::_task = NULL; -PerfDataList* StatSampler::_sampled = NULL; +StatSamplerTask* StatSampler::_task = nullptr; +PerfDataList* StatSampler::_sampled = nullptr; /* * the initialize method is called from the engage() method @@ -107,7 +107,7 @@ void StatSampler::disengage() { // remove StatSamplerTask _task->disenroll(); delete _task; - _task = NULL; + _task = nullptr; // force a final sample sample_data(_sampled); @@ -123,9 +123,9 @@ void StatSampler::destroy() { if (!UsePerfData) return; - if (_sampled != NULL) { + if (_sampled != nullptr) { delete(_sampled); - _sampled = NULL; + _sampled = nullptr; } } @@ -135,7 +135,7 @@ void StatSampler::destroy() { */ void StatSampler::sample_data(PerfDataList* list) { - assert(list != NULL, "null list unexpected"); + assert(list != nullptr, "null list unexpected"); for (int index = 0; index < list->length(); index++) { PerfData* item = list->at(index); @@ -161,14 +161,14 @@ void StatSampler::collect_sample() { // // if (PerfDataManager::count() > previous) { // // get a new copy of the sampled list - // if (_sampled != NULL) { + // if (_sampled != nullptr) { // delete(_sampled); - // _sampled = NULL; + // _sampled = nullptr; // } // _sampled = PerfDataManager::sampled(); // } - assert(_sampled != NULL, "list not initialized"); + assert(_sampled != nullptr, "list not initialized"); sample_data(_sampled); } @@ -196,7 +196,7 @@ void StatSampler::assert_system_property(const char* name, const char* value, TR CHECK); oop value_oop = result.get_oop(); - assert(value_oop != NULL, "property must have a value"); + assert(value_oop != nullptr, "property must have a value"); // convert Java String to utf8 string char* system_value = java_lang_String::as_utf8_string(value_oop); @@ -211,9 +211,9 @@ void StatSampler::assert_system_property(const char* name, const char* value, TR */ void StatSampler::add_property_constant(CounterNS name_space, const char* name, const char* value, TRAPS) { // the property must exist - assert(value != NULL, "property name should be have a value: %s", name); + assert(value != nullptr, "property name should be have a value: %s", name); assert_system_property(name, value, CHECK); - if (value != NULL) { + if (value != nullptr) { // create the property counter PerfDataManager::create_string_constant(name_space, name, value, CHECK); } diff --git a/src/hotspot/share/runtime/statSampler.hpp b/src/hotspot/share/runtime/statSampler.hpp index 4590f85302cf7..a26f9743e0c61 100644 --- a/src/hotspot/share/runtime/statSampler.hpp +++ b/src/hotspot/share/runtime/statSampler.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,7 +61,7 @@ class StatSampler : AllStatic { static void engage(); static void disengage(); - static bool is_active() { return _task != NULL; } + static bool is_active() { return _task != nullptr; } static void initialize(); static void destroy(); diff --git a/src/hotspot/share/runtime/stubCodeGenerator.cpp b/src/hotspot/share/runtime/stubCodeGenerator.cpp index 6ec4ee31e9532..177bc66853e59 100644 --- a/src/hotspot/share/runtime/stubCodeGenerator.cpp +++ b/src/hotspot/share/runtime/stubCodeGenerator.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,12 +35,12 @@ // Implementation of StubCodeDesc -StubCodeDesc* StubCodeDesc::_list = NULL; +StubCodeDesc* StubCodeDesc::_list = nullptr; bool StubCodeDesc::_frozen = false; StubCodeDesc* StubCodeDesc::desc_for(address pc) { StubCodeDesc* p = _list; - while (p != NULL && !p->contains(pc)) { + while (p != nullptr && !p->contains(pc)) { p = p->_next; } return p; @@ -71,7 +71,7 @@ StubCodeGenerator::~StubCodeGenerator() { #ifndef PRODUCT CodeBuffer* cbuf = _masm->code(); CodeBlob* blob = CodeCache::find_blob(cbuf->insts()->start()); - if (blob != NULL) { + if (blob != nullptr) { blob->use_remarks(cbuf->asm_remarks()); blob->use_strings(cbuf->dbg_strings()); } diff --git a/src/hotspot/share/runtime/stubCodeGenerator.hpp b/src/hotspot/share/runtime/stubCodeGenerator.hpp index 7bc454ab1907c..97b2facaac0a2 100644 --- a/src/hotspot/share/runtime/stubCodeGenerator.hpp +++ b/src/hotspot/share/runtime/stubCodeGenerator.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -53,7 +53,7 @@ class StubCodeDesc: public CHeapObj { void set_begin(address begin) { assert(begin >= _begin, "begin may not decrease"); - assert(_end == NULL || begin <= _end, "begin & end not properly ordered"); + assert(_end == nullptr || begin <= _end, "begin & end not properly ordered"); _begin = begin; } @@ -68,11 +68,11 @@ class StubCodeDesc: public CHeapObj { static StubCodeDesc* first() { return _list; } static StubCodeDesc* next(StubCodeDesc* desc) { return desc->_next; } - static StubCodeDesc* desc_for(address pc); // returns the code descriptor for the code containing pc or NULL + static StubCodeDesc* desc_for(address pc); // returns the code descriptor for the code containing pc or null - StubCodeDesc(const char* group, const char* name, address begin, address end = NULL) { + StubCodeDesc(const char* group, const char* name, address begin, address end = nullptr) { assert(!_frozen, "no modifications allowed"); - assert(name != NULL, "no name specified"); + assert(name != nullptr, "no name specified"); _next = _list; _group = group; _name = name; diff --git a/src/hotspot/share/runtime/stubRoutines.cpp b/src/hotspot/share/runtime/stubRoutines.cpp index 872cf35887927..b582260a84497 100644 --- a/src/hotspot/share/runtime/stubRoutines.cpp +++ b/src/hotspot/share/runtime/stubRoutines.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,37 +40,37 @@ #include "opto/runtime.hpp" #endif -UnsafeCopyMemory* UnsafeCopyMemory::_table = NULL; +UnsafeCopyMemory* UnsafeCopyMemory::_table = nullptr; int UnsafeCopyMemory::_table_length = 0; int UnsafeCopyMemory::_table_max_length = 0; -address UnsafeCopyMemory::_common_exit_stub_pc = NULL; +address UnsafeCopyMemory::_common_exit_stub_pc = nullptr; // Implementation of StubRoutines - for a description // of how to extend it, see the header file. // Class Variables -BufferBlob* StubRoutines::_code1 = NULL; -BufferBlob* StubRoutines::_code2 = NULL; -BufferBlob* StubRoutines::_code3 = NULL; +BufferBlob* StubRoutines::_code1 = nullptr; +BufferBlob* StubRoutines::_code2 = nullptr; +BufferBlob* StubRoutines::_code3 = nullptr; -address StubRoutines::_call_stub_return_address = NULL; -address StubRoutines::_call_stub_entry = NULL; +address StubRoutines::_call_stub_return_address = nullptr; +address StubRoutines::_call_stub_entry = nullptr; -address StubRoutines::_catch_exception_entry = NULL; -address StubRoutines::_forward_exception_entry = NULL; -address StubRoutines::_throw_AbstractMethodError_entry = NULL; -address StubRoutines::_throw_IncompatibleClassChangeError_entry = NULL; -address StubRoutines::_throw_NullPointerException_at_call_entry = NULL; -address StubRoutines::_throw_StackOverflowError_entry = NULL; -address StubRoutines::_throw_delayed_StackOverflowError_entry = NULL; +address StubRoutines::_catch_exception_entry = nullptr; +address StubRoutines::_forward_exception_entry = nullptr; +address StubRoutines::_throw_AbstractMethodError_entry = nullptr; +address StubRoutines::_throw_IncompatibleClassChangeError_entry = nullptr; +address StubRoutines::_throw_NullPointerException_at_call_entry = nullptr; +address StubRoutines::_throw_StackOverflowError_entry = nullptr; +address StubRoutines::_throw_delayed_StackOverflowError_entry = nullptr; jint StubRoutines::_verify_oop_count = 0; -address StubRoutines::_verify_oop_subroutine_entry = NULL; -address StubRoutines::_atomic_xchg_entry = NULL; -address StubRoutines::_atomic_cmpxchg_entry = NULL; -address StubRoutines::_atomic_cmpxchg_long_entry = NULL; -address StubRoutines::_atomic_add_entry = NULL; -address StubRoutines::_fence_entry = NULL; +address StubRoutines::_verify_oop_subroutine_entry = nullptr; +address StubRoutines::_atomic_xchg_entry = nullptr; +address StubRoutines::_atomic_cmpxchg_entry = nullptr; +address StubRoutines::_atomic_cmpxchg_long_entry = nullptr; +address StubRoutines::_atomic_add_entry = nullptr; +address StubRoutines::_fence_entry = nullptr; // Compiled code entry points default values // The default functions don't have separate disjoint versions. @@ -100,13 +100,13 @@ address StubRoutines::_arrayof_jlong_disjoint_arraycopy = CAST_FROM_FN_PTR(addr address StubRoutines::_arrayof_oop_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_oop_copy); address StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_oop_copy_uninit); -address StubRoutines::_data_cache_writeback = NULL; -address StubRoutines::_data_cache_writeback_sync = NULL; +address StubRoutines::_data_cache_writeback = nullptr; +address StubRoutines::_data_cache_writeback_sync = nullptr; -address StubRoutines::_checkcast_arraycopy = NULL; -address StubRoutines::_checkcast_arraycopy_uninit = NULL; -address StubRoutines::_unsafe_arraycopy = NULL; -address StubRoutines::_generic_arraycopy = NULL; +address StubRoutines::_checkcast_arraycopy = nullptr; +address StubRoutines::_checkcast_arraycopy_uninit = nullptr; +address StubRoutines::_unsafe_arraycopy = nullptr; +address StubRoutines::_generic_arraycopy = nullptr; address StubRoutines::_jbyte_fill; address StubRoutines::_jshort_fill; @@ -115,68 +115,68 @@ address StubRoutines::_arrayof_jbyte_fill; address StubRoutines::_arrayof_jshort_fill; address StubRoutines::_arrayof_jint_fill; -address StubRoutines::_aescrypt_encryptBlock = NULL; -address StubRoutines::_aescrypt_decryptBlock = NULL; -address StubRoutines::_cipherBlockChaining_encryptAESCrypt = NULL; -address StubRoutines::_cipherBlockChaining_decryptAESCrypt = NULL; -address StubRoutines::_electronicCodeBook_encryptAESCrypt = NULL; -address StubRoutines::_electronicCodeBook_decryptAESCrypt = NULL; -address StubRoutines::_counterMode_AESCrypt = NULL; -address StubRoutines::_galoisCounterMode_AESCrypt = NULL; -address StubRoutines::_ghash_processBlocks = NULL; -address StubRoutines::_chacha20Block = NULL; -address StubRoutines::_base64_encodeBlock = NULL; -address StubRoutines::_base64_decodeBlock = NULL; -address StubRoutines::_poly1305_processBlocks = NULL; - -address StubRoutines::_md5_implCompress = NULL; -address StubRoutines::_md5_implCompressMB = NULL; -address StubRoutines::_sha1_implCompress = NULL; -address StubRoutines::_sha1_implCompressMB = NULL; -address StubRoutines::_sha256_implCompress = NULL; -address StubRoutines::_sha256_implCompressMB = NULL; -address StubRoutines::_sha512_implCompress = NULL; -address StubRoutines::_sha512_implCompressMB = NULL; -address StubRoutines::_sha3_implCompress = NULL; -address StubRoutines::_sha3_implCompressMB = NULL; - -address StubRoutines::_updateBytesCRC32 = NULL; -address StubRoutines::_crc_table_adr = NULL; - -address StubRoutines::_crc32c_table_addr = NULL; -address StubRoutines::_updateBytesCRC32C = NULL; -address StubRoutines::_updateBytesAdler32 = NULL; - -address StubRoutines::_multiplyToLen = NULL; -address StubRoutines::_squareToLen = NULL; -address StubRoutines::_mulAdd = NULL; -address StubRoutines::_montgomeryMultiply = NULL; -address StubRoutines::_montgomerySquare = NULL; -address StubRoutines::_bigIntegerRightShiftWorker = NULL; -address StubRoutines::_bigIntegerLeftShiftWorker = NULL; - -address StubRoutines::_vectorizedMismatch = NULL; - -address StubRoutines::_dexp = NULL; -address StubRoutines::_dlog = NULL; -address StubRoutines::_dlog10 = NULL; -address StubRoutines::_dpow = NULL; -address StubRoutines::_dsin = NULL; -address StubRoutines::_dcos = NULL; -address StubRoutines::_dlibm_sin_cos_huge = NULL; -address StubRoutines::_dlibm_reduce_pi04l = NULL; -address StubRoutines::_dlibm_tan_cot_huge = NULL; -address StubRoutines::_dtan = NULL; - -address StubRoutines::_vector_f_math[VectorSupport::NUM_VEC_SIZES][VectorSupport::NUM_SVML_OP] = {{NULL}, {NULL}}; -address StubRoutines::_vector_d_math[VectorSupport::NUM_VEC_SIZES][VectorSupport::NUM_SVML_OP] = {{NULL}, {NULL}}; - -address StubRoutines::_cont_thaw = NULL; -address StubRoutines::_cont_returnBarrier = NULL; -address StubRoutines::_cont_returnBarrierExc = NULL; - -JFR_ONLY(RuntimeStub* StubRoutines::_jfr_write_checkpoint_stub = NULL;) -JFR_ONLY(address StubRoutines::_jfr_write_checkpoint = NULL;) +address StubRoutines::_aescrypt_encryptBlock = nullptr; +address StubRoutines::_aescrypt_decryptBlock = nullptr; +address StubRoutines::_cipherBlockChaining_encryptAESCrypt = nullptr; +address StubRoutines::_cipherBlockChaining_decryptAESCrypt = nullptr; +address StubRoutines::_electronicCodeBook_encryptAESCrypt = nullptr; +address StubRoutines::_electronicCodeBook_decryptAESCrypt = nullptr; +address StubRoutines::_counterMode_AESCrypt = nullptr; +address StubRoutines::_galoisCounterMode_AESCrypt = nullptr; +address StubRoutines::_ghash_processBlocks = nullptr; +address StubRoutines::_chacha20Block = nullptr; +address StubRoutines::_base64_encodeBlock = nullptr; +address StubRoutines::_base64_decodeBlock = nullptr; +address StubRoutines::_poly1305_processBlocks = nullptr; + +address StubRoutines::_md5_implCompress = nullptr; +address StubRoutines::_md5_implCompressMB = nullptr; +address StubRoutines::_sha1_implCompress = nullptr; +address StubRoutines::_sha1_implCompressMB = nullptr; +address StubRoutines::_sha256_implCompress = nullptr; +address StubRoutines::_sha256_implCompressMB = nullptr; +address StubRoutines::_sha512_implCompress = nullptr; +address StubRoutines::_sha512_implCompressMB = nullptr; +address StubRoutines::_sha3_implCompress = nullptr; +address StubRoutines::_sha3_implCompressMB = nullptr; + +address StubRoutines::_updateBytesCRC32 = nullptr; +address StubRoutines::_crc_table_adr = nullptr; + +address StubRoutines::_crc32c_table_addr = nullptr; +address StubRoutines::_updateBytesCRC32C = nullptr; +address StubRoutines::_updateBytesAdler32 = nullptr; + +address StubRoutines::_multiplyToLen = nullptr; +address StubRoutines::_squareToLen = nullptr; +address StubRoutines::_mulAdd = nullptr; +address StubRoutines::_montgomeryMultiply = nullptr; +address StubRoutines::_montgomerySquare = nullptr; +address StubRoutines::_bigIntegerRightShiftWorker = nullptr; +address StubRoutines::_bigIntegerLeftShiftWorker = nullptr; + +address StubRoutines::_vectorizedMismatch = nullptr; + +address StubRoutines::_dexp = nullptr; +address StubRoutines::_dlog = nullptr; +address StubRoutines::_dlog10 = nullptr; +address StubRoutines::_dpow = nullptr; +address StubRoutines::_dsin = nullptr; +address StubRoutines::_dcos = nullptr; +address StubRoutines::_dlibm_sin_cos_huge = nullptr; +address StubRoutines::_dlibm_reduce_pi04l = nullptr; +address StubRoutines::_dlibm_tan_cot_huge = nullptr; +address StubRoutines::_dtan = nullptr; + +address StubRoutines::_vector_f_math[VectorSupport::NUM_VEC_SIZES][VectorSupport::NUM_SVML_OP] = {{nullptr}, {nullptr}}; +address StubRoutines::_vector_d_math[VectorSupport::NUM_VEC_SIZES][VectorSupport::NUM_SVML_OP] = {{nullptr}, {nullptr}}; + +address StubRoutines::_cont_thaw = nullptr; +address StubRoutines::_cont_returnBarrier = nullptr; +address StubRoutines::_cont_returnBarrierExc = nullptr; + +JFR_ONLY(RuntimeStub* StubRoutines::_jfr_write_checkpoint_stub = nullptr;) +JFR_ONLY(address StubRoutines::_jfr_write_checkpoint = nullptr;) // Initialization // @@ -208,18 +208,18 @@ address UnsafeCopyMemory::page_error_continue_pc(address pc) { return entry->error_exit_pc(); } } - return NULL; + return nullptr; } void StubRoutines::initialize1() { - if (_code1 == NULL) { + if (_code1 == nullptr) { ResourceMark rm; TraceTime timer("StubRoutines generation 1", TRACETIME_LOG(Info, startuptime)); // Add extra space for large CodeEntryAlignment int max_aligned_stubs = 10; int size = code_size1 + CodeEntryAlignment * max_aligned_stubs; _code1 = BufferBlob::create("StubRoutines (1)", size); - if (_code1 == NULL) { + if (_code1 == nullptr) { vm_exit_out_of_memory(code_size1, OOM_MALLOC_ERROR, "CodeCache: no room for StubRoutines (1)"); } CodeBuffer buffer(_code1); @@ -268,11 +268,11 @@ static void test_arraycopy_func(address func, int alignment) { #endif // ASSERT void StubRoutines::initializeContinuationStubs() { - if (_code3 == NULL) { + if (_code3 == nullptr) { ResourceMark rm; TraceTime timer("StubRoutines generation 3", TRACETIME_LOG(Info, startuptime)); _code3 = BufferBlob::create("StubRoutines (3)", code_size2); - if (_code3 == NULL) { + if (_code3 == nullptr) { vm_exit_out_of_memory(code_size2, OOM_MALLOC_ERROR, "CodeCache: no room for StubRoutines (3)"); } CodeBuffer buffer(_code3); @@ -284,14 +284,14 @@ void StubRoutines::initializeContinuationStubs() { } void StubRoutines::initialize2() { - if (_code2 == NULL) { + if (_code2 == nullptr) { ResourceMark rm; TraceTime timer("StubRoutines generation 2", TRACETIME_LOG(Info, startuptime)); // Add extra space for large CodeEntryAlignment int max_aligned_stubs = 100; int size = code_size2 + CodeEntryAlignment * max_aligned_stubs; _code2 = BufferBlob::create("StubRoutines (2)", size); - if (_code2 == NULL) { + if (_code2 == nullptr) { vm_exit_out_of_memory(code_size2, OOM_MALLOC_ERROR, "CodeCache: no room for StubRoutines (2)"); } CodeBuffer buffer(_code2); @@ -320,7 +320,7 @@ void StubRoutines::initialize2() { #undef TEST_ARRAYCOPY #define TEST_FILL(type) \ - if (_##type##_fill != NULL) { \ + if (_##type##_fill != nullptr) { \ union { \ double d; \ type body[96]; \ @@ -507,11 +507,11 @@ address StubRoutines::select_fill_function(BasicType t, bool aligned, const char case T_ADDRESS: case T_VOID: // Currently unsupported - return NULL; + return nullptr; default: ShouldNotReachHere(); - return NULL; + return nullptr; } #undef RETURN_STUB @@ -584,7 +584,7 @@ StubRoutines::select_arraycopy_function(BasicType t, bool aligned, bool disjoint } default: ShouldNotReachHere(); - return NULL; + return nullptr; } #undef RETURN_STUB @@ -593,21 +593,21 @@ StubRoutines::select_arraycopy_function(BasicType t, bool aligned, bool disjoint UnsafeCopyMemoryMark::UnsafeCopyMemoryMark(StubCodeGenerator* cgen, bool add_entry, bool continue_at_scope_end, address error_exit_pc) { _cgen = cgen; - _ucm_entry = NULL; + _ucm_entry = nullptr; if (add_entry) { - address err_exit_pc = NULL; + address err_exit_pc = nullptr; if (!continue_at_scope_end) { - err_exit_pc = error_exit_pc != NULL ? error_exit_pc : UnsafeCopyMemory::common_exit_stub_pc(); + err_exit_pc = error_exit_pc != nullptr ? error_exit_pc : UnsafeCopyMemory::common_exit_stub_pc(); } - assert(err_exit_pc != NULL || continue_at_scope_end, "error exit not set"); - _ucm_entry = UnsafeCopyMemory::add_to_table(_cgen->assembler()->pc(), NULL, err_exit_pc); + assert(err_exit_pc != nullptr || continue_at_scope_end, "error exit not set"); + _ucm_entry = UnsafeCopyMemory::add_to_table(_cgen->assembler()->pc(), nullptr, err_exit_pc); } } UnsafeCopyMemoryMark::~UnsafeCopyMemoryMark() { - if (_ucm_entry != NULL) { + if (_ucm_entry != nullptr) { _ucm_entry->set_end_pc(_cgen->assembler()->pc()); - if (_ucm_entry->error_exit_pc() == NULL) { + if (_ucm_entry->error_exit_pc() == nullptr) { _ucm_entry->set_error_exit_pc(_cgen->assembler()->pc()); } } diff --git a/src/hotspot/share/runtime/stubRoutines.hpp b/src/hotspot/share/runtime/stubRoutines.hpp index 80e77beb8aa31..428f6641e9e36 100644 --- a/src/hotspot/share/runtime/stubRoutines.hpp +++ b/src/hotspot/share/runtime/stubRoutines.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -85,7 +85,7 @@ class UnsafeCopyMemory : public CHeapObj { static UnsafeCopyMemory* _table; static int _table_length; static int _table_max_length; - UnsafeCopyMemory() : _start_pc(NULL), _end_pc(NULL), _error_exit_pc(NULL) {} + UnsafeCopyMemory() : _start_pc(nullptr), _end_pc(nullptr), _error_exit_pc(nullptr) {} void set_start_pc(address pc) { _start_pc = pc; } void set_end_pc(address pc) { _end_pc = pc; } void set_error_exit_pc(address pc) { _error_exit_pc = pc; } @@ -117,7 +117,7 @@ class UnsafeCopyMemoryMark : public StackObj { UnsafeCopyMemory* _ucm_entry; StubCodeGenerator* _cgen; public: - UnsafeCopyMemoryMark(StubCodeGenerator* cgen, bool add_entry, bool continue_at_scope_end, address error_exit_pc = NULL); + UnsafeCopyMemoryMark(StubCodeGenerator* cgen, bool add_entry, bool continue_at_scope_end, address error_exit_pc = nullptr); ~UnsafeCopyMemoryMark(); }; @@ -270,8 +270,8 @@ class StubRoutines: AllStatic { static bool contains(address addr) { return - (_code1 != NULL && _code1->blob_contains(addr)) || - (_code2 != NULL && _code2->blob_contains(addr)) ; + (_code1 != nullptr && _code1->blob_contains(addr)) || + (_code2 != nullptr && _code2->blob_contains(addr)) ; } static RuntimeBlob* code1() { return _code1; } diff --git a/src/hotspot/share/runtime/synchronizer.cpp b/src/hotspot/share/runtime/synchronizer.cpp index 36302a8390f81..d34a3ccab1ad8 100644 --- a/src/hotspot/share/runtime/synchronizer.cpp +++ b/src/hotspot/share/runtime/synchronizer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -126,11 +126,11 @@ size_t MonitorList::unlink_deflated(Thread* current, LogStream* ls, elapsedTimer* timer_p, GrowableArray* unlinked_list) { size_t unlinked_count = 0; - ObjectMonitor* prev = NULL; + ObjectMonitor* prev = nullptr; ObjectMonitor* head = Atomic::load_acquire(&_head); ObjectMonitor* m = head; - // The in-use list head can be NULL during the final audit. - while (m != NULL) { + // The in-use list head can be null during the final audit. + while (m != nullptr) { if (m->is_being_async_deflated()) { // Find next live ObjectMonitor. ObjectMonitor* next = m; @@ -143,8 +143,8 @@ size_t MonitorList::unlink_deflated(Thread* current, LogStream* ls, // Reached the max so bail out on the gathering loop. break; } - } while (next != NULL && next->is_being_async_deflated()); - if (prev == NULL) { + } while (next != nullptr && next->is_being_async_deflated()); + if (prev == nullptr) { ObjectMonitor* prev_head = Atomic::cmpxchg(&_head, head, next); if (prev_head != head) { // Find new prev ObjectMonitor that just got inserted. @@ -201,11 +201,11 @@ ObjectMonitor* MonitorList::Iterator::next() { // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly. #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \ - char* bytes = NULL; \ + char* bytes = nullptr; \ int len = 0; \ jlong jtid = SharedRuntime::get_java_tid(thread); \ Symbol* klassname = obj->klass()->name(); \ - if (klassname != NULL) { \ + if (klassname != nullptr) { \ bytes = (char*)klassname->bytes(); \ len = klassname->utf8_length(); \ } @@ -308,7 +308,7 @@ static uintx _no_progress_cnt = 0; bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool all) { assert(current->thread_state() == _thread_in_Java, "invariant"); NoSafepointVerifier nsv; - if (obj == NULL) return false; // slow-path for invalid obj + if (obj == nullptr) return false; // slow-path for invalid obj const markWord mark = obj->mark(); if (mark.has_locker() && current->is_lock_owned((address)mark.locker())) { @@ -322,7 +322,7 @@ bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool al assert(mon->object() == oop(obj), "invariant"); if (mon->owner() != current) return false; // slow-path for IMS exception - if (mon->first_waiter() != NULL) { + if (mon->first_waiter() != nullptr) { // We have one or more waiters. Since this is an inflated monitor // that we own, we can transfer one or more threads from the waitset // to the entrylist here and now, avoiding the slow-path. @@ -335,7 +335,7 @@ bool ObjectSynchronizer::quick_notify(oopDesc* obj, JavaThread* current, bool al do { mon->INotify(current); ++free_count; - } while (mon->first_waiter() != NULL && all); + } while (mon->first_waiter() != nullptr && all); OM_PERFDATA_OP(Notifications, inc(free_count)); } return true; @@ -356,7 +356,7 @@ bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current, BasicLock * lock) { assert(current->thread_state() == _thread_in_Java, "invariant"); NoSafepointVerifier nsv; - if (obj == NULL) return false; // Need to throw NPE + if (obj == nullptr) return false; // Need to throw NPE if (obj->klass()->is_value_based()) { return false; @@ -369,7 +369,7 @@ bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current, // An async deflation or GC can race us before we manage to make // the ObjectMonitor busy by setting the owner below. If we detect // that race we just bail out to the slow-path here. - if (m->object_peek() == NULL) { + if (m->object_peek() == nullptr) { return false; } JavaThread* const owner = static_cast(m->owner_raw()); @@ -387,7 +387,7 @@ bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current, // This Java Monitor is inflated so obj's header will never be // displaced to this thread's BasicLock. Make the displaced header - // non-NULL so this BasicLock is not seen as recursive nor as + // non-null so this BasicLock is not seen as recursive nor as // being locked. We do this unconditionally so that this thread's // BasicLock cannot be mis-interpreted by any stack walkers. For // performance reasons, stack walkers generally first check for @@ -396,7 +396,7 @@ bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current, // and last are the inflated Java Monitor (ObjectMonitor) checks. lock->set_displaced_header(markWord::unused_mark()); - if (owner == NULL && m->try_set_owner_from(NULL, current) == NULL) { + if (owner == nullptr && m->try_set_owner_from(nullptr, current) == nullptr) { assert(m->_recursions == 0, "invariant"); current->inc_held_monitor_count(); return true; @@ -432,7 +432,7 @@ void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread current->print_active_stack_on(&ss); char* base = (char*)strstr(ss.base(), "at"); char* newline = (char*)strchr(ss.base(), '\n'); - if (newline != NULL) { + if (newline != nullptr) { *newline = '\0'; } fatal("Synchronizing on object " INTPTR_FORMAT " of klass %s %s", p2i(obj()), obj->klass()->external_name(), base); @@ -496,7 +496,7 @@ void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) current->is_lock_owned((address)mark.locker())) { assert(lock != mark.locker(), "must not re-lock the same lock"); assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock"); - lock->set_displaced_header(markWord::from_pointer(NULL)); + lock->set_displaced_header(markWord::from_pointer(nullptr)); return; } @@ -528,7 +528,7 @@ void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) markWord dhw = lock->displaced_header(); if (dhw.value() == 0) { - // If the displaced header is NULL, then this exit matches up with + // If the displaced header is null, then this exit matches up with // a recursive enter. No real work to do here except for diagnostics. #ifndef PRODUCT if (mark != markWord::INFLATING()) { @@ -658,13 +658,13 @@ ObjectLocker::ObjectLocker(Handle obj, JavaThread* thread) { _thread->check_for_valid_safepoint_state(); _obj = obj; - if (_obj() != NULL) { + if (_obj() != nullptr) { ObjectSynchronizer::enter(_obj, &_lock, _thread); } } ObjectLocker::~ObjectLocker() { - if (_obj() != NULL) { + if (_obj() != nullptr) { ObjectSynchronizer::exit(_obj(), &_lock, _thread); } } @@ -859,7 +859,7 @@ static inline intptr_t get_next_hash(Thread* current, oop obj) { intptr_t ObjectSynchronizer::FastHashCode(Thread* current, oop obj) { while (true) { - ObjectMonitor* monitor = NULL; + ObjectMonitor* monitor = nullptr; markWord temp, test; intptr_t hash; markWord mark = read_stable_mark(obj); @@ -993,7 +993,7 @@ bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* current, JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) { oop obj = h_obj(); - address owner = NULL; + address owner = nullptr; markWord mark = read_stable_mark(obj); @@ -1007,12 +1007,12 @@ JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_ob // The first stage of async deflation does not affect any field // used by this comparison so the ObjectMonitor* is usable here. ObjectMonitor* monitor = mark.monitor(); - assert(monitor != NULL, "monitor should be non-null"); + assert(monitor != nullptr, "monitor should be non-null"); owner = (address) monitor->owner(); } - if (owner != NULL) { - // owning_thread_from_monitor_owner() may also return NULL here + if (owner != nullptr) { + // owning_thread_from_monitor_owner() may also return null here return Threads::owning_thread_from_monitor_owner(t_list, owner); } @@ -1021,7 +1021,7 @@ JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_ob // locked by another thread when reaching here. // assert(mark.is_neutral(), "sanity check"); - return NULL; + return nullptr; } // Visitors ... @@ -1040,7 +1040,7 @@ void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure, JavaThread* t // is set to a stack lock address in the target thread. continue; } - if (!mid->is_being_async_deflated() && mid->object_peek() != NULL) { + if (!mid->is_being_async_deflated() && mid->object_peek() != nullptr) { // Only process with closure if the object is set. // monitors_iterate() is only called at a safepoint or when the @@ -1065,7 +1065,7 @@ void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure, ObjectMonitor* mid = *iter.next(); // Owner set to a stack lock address in thread should never be seen here: assert(mid->owner() == thread, "must be"); - if (!mid->is_being_async_deflated() && mid->object_peek() != NULL) { + if (!mid->is_being_async_deflated() && mid->object_peek() != nullptr) { // Only process with closure if the object is set. // monitors_iterate() is only called at a safepoint or when the @@ -1181,7 +1181,7 @@ jlong ObjectSynchronizer::time_since_last_async_deflation_ms() { static void post_monitor_inflate_event(EventJavaMonitorInflate* event, const oop obj, ObjectSynchronizer::InflateCause cause) { - assert(event != NULL, "invariant"); + assert(event != nullptr, "invariant"); event->set_monitorClass(obj->klass()); event->set_address((uintptr_t)(void*)obj); event->set_cause((u1)cause); @@ -1298,8 +1298,8 @@ ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object, // with this thread we could simply set m->_owner = current. // Note that a thread can inflate an object // that it has stack-locked -- as might happen in wait() -- directly - // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. - m->set_owner_from(NULL, mark.locker()); + // with CAS. That is, we can avoid the xchg-nullptr .... ST idiom. + m->set_owner_from(nullptr, mark.locker()); // TODO-FIXME: assert BasicLock->dhw != 0. // Must preserve store ordering. The monitor state must @@ -1333,7 +1333,7 @@ ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object, // pre-locked ObjectMonitor pointer into the object header. A successful // CAS inflates the object *and* confers ownership to the inflating thread. // In the current implementation we use a 2-step mechanism where we CAS() - // to inflate and then CAS() again to try to swing _owner from NULL to current. + // to inflate and then CAS() again to try to swing _owner from null to current. // An inflateTry() method that we could call from enter() would be useful. // Catch if the object's header is not neutral (not locked and @@ -1345,7 +1345,7 @@ ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object, if (object->cas_set_mark(markWord::encode(m), mark) != mark) { delete m; - m = NULL; + m = nullptr; continue; // interference - the markword changed - just retry. // The state-transitions are one-way, so there's no chance of @@ -1380,7 +1380,7 @@ void ObjectSynchronizer::chk_for_block_req(JavaThread* current, const char* op_n } // A safepoint/handshake has started. - if (ls != NULL) { + if (ls != nullptr) { timer_p->stop(); ls->print_cr("pausing %s: %s=" SIZE_FORMAT ", in_use_list stats: ceiling=" SIZE_FORMAT ", count=" SIZE_FORMAT ", max=" SIZE_FORMAT, @@ -1393,7 +1393,7 @@ void ObjectSynchronizer::chk_for_block_req(JavaThread* current, const char* op_n ThreadBlockInVM tbivm(current); } - if (ls != NULL) { + if (ls != nullptr) { ls->print_cr("resuming %s: in_use_list stats: ceiling=" SIZE_FORMAT ", count=" SIZE_FORMAT ", max=" SIZE_FORMAT, op_name, in_use_list_ceiling(), _in_use_list.count(), _in_use_list.max()); @@ -1471,7 +1471,7 @@ size_t ObjectSynchronizer::deflate_idle_monitors(ObjectMonitorsHashtable* table) LogStreamHandle(Debug, monitorinflation) lsh_debug; LogStreamHandle(Info, monitorinflation) lsh_info; - LogStream* ls = NULL; + LogStream* ls = nullptr; if (log_is_enabled(Debug, monitorinflation)) { ls = &lsh_debug; } else if (log_is_enabled(Info, monitorinflation)) { @@ -1479,7 +1479,7 @@ size_t ObjectSynchronizer::deflate_idle_monitors(ObjectMonitorsHashtable* table) } elapsedTimer timer; - if (ls != NULL) { + if (ls != nullptr) { ls->print_cr("begin deflating: in_use_list stats: ceiling=" SIZE_FORMAT ", count=" SIZE_FORMAT ", max=" SIZE_FORMAT, in_use_list_ceiling(), _in_use_list.count(), _in_use_list.max()); timer.start(); @@ -1500,7 +1500,7 @@ size_t ObjectSynchronizer::deflate_idle_monitors(ObjectMonitorsHashtable* table) GrowableArray delete_list((int)deflated_count); unlinked_count = _in_use_list.unlink_deflated(current, ls, &timer, &delete_list); if (current->is_Java_thread()) { - if (ls != NULL) { + if (ls != nullptr) { timer.stop(); ls->print_cr("before handshaking: unlinked_count=" SIZE_FORMAT ", in_use_list stats: ceiling=" SIZE_FORMAT ", count=" @@ -1514,7 +1514,7 @@ size_t ObjectSynchronizer::deflate_idle_monitors(ObjectMonitorsHashtable* table) HandshakeForDeflation hfd_hc; Handshake::execute(&hfd_hc); - if (ls != NULL) { + if (ls != nullptr) { ls->print_cr("after handshaking: in_use_list stats: ceiling=" SIZE_FORMAT ", count=" SIZE_FORMAT ", max=" SIZE_FORMAT, in_use_list_ceiling(), _in_use_list.count(), _in_use_list.max()); @@ -1537,7 +1537,7 @@ size_t ObjectSynchronizer::deflate_idle_monitors(ObjectMonitorsHashtable* table) assert(unlinked_count == deleted_count, "must be"); } - if (ls != NULL) { + if (ls != nullptr) { timer.stop(); if (deflated_count != 0 || unlinked_count != 0 || log_is_enabled(Debug, monitorinflation)) { ls->print_cr("deflated_count=" SIZE_FORMAT ", {unlinked,deleted}_count=" SIZE_FORMAT " monitors in %3.7f secs", @@ -1681,7 +1681,7 @@ void ObjectSynchronizer::audit_and_print_stats(bool on_exit) { LogStreamHandle(Debug, monitorinflation) lsh_debug; LogStreamHandle(Info, monitorinflation) lsh_info; LogStreamHandle(Trace, monitorinflation) lsh_trace; - LogStream* ls = NULL; + LogStream* ls = nullptr; if (log_is_enabled(Trace, monitorinflation)) { ls = &lsh_trace; } else if (log_is_enabled(Debug, monitorinflation)) { @@ -1689,7 +1689,7 @@ void ObjectSynchronizer::audit_and_print_stats(bool on_exit) { } else if (log_is_enabled(Info, monitorinflation)) { ls = &lsh_info; } - assert(ls != NULL, "sanity check"); + assert(ls != nullptr, "sanity check"); int error_cnt = 0; @@ -1760,11 +1760,11 @@ void ObjectSynchronizer::chk_in_use_entry(ObjectMonitor* n, outputStream* out, } if (n->header().value() == 0) { out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor must " - "have non-NULL _header field.", p2i(n)); + "have non-null _header field.", p2i(n)); *error_cnt_p = *error_cnt_p + 1; } const oop obj = n->object_peek(); - if (obj != NULL) { + if (obj != nullptr) { const markWord mark = obj->mark(); if (!mark.has_monitor()) { out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use monitor's " @@ -1802,8 +1802,8 @@ void ObjectSynchronizer::log_in_use_monitor_details(outputStream* out) { const markWord mark = mid->header(); ResourceMark rm; out->print(INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT " %s", p2i(mid), - mid->is_busy(), mark.hash() != 0, mid->owner() != NULL, - p2i(obj), obj == NULL ? "" : obj->klass()->external_name()); + mid->is_busy(), mark.hash() != 0, mid->owner() != nullptr, + p2i(obj), obj == nullptr ? "" : obj->klass()->external_name()); if (mid->is_busy()) { out->print(" (%s)", mid->is_busy_to_string(&ss)); ss.reset(); diff --git a/src/hotspot/share/runtime/synchronizer.hpp b/src/hotspot/share/runtime/synchronizer.hpp index 9592d1a43f7a5..06eb40dc504c2 100644 --- a/src/hotspot/share/runtime/synchronizer.hpp +++ b/src/hotspot/share/runtime/synchronizer.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -109,7 +109,7 @@ class MonitorList::Iterator { public: Iterator(ObjectMonitor* head) : _current(head) {} - bool has_next() const { return _current != NULL; } + bool has_next() const { return _current != nullptr; } ObjectMonitor* next(); }; diff --git a/src/hotspot/share/runtime/task.cpp b/src/hotspot/share/runtime/task.cpp index 12b15bcdf7761..d38c1d145d03c 100644 --- a/src/hotspot/share/runtime/task.cpp +++ b/src/hotspot/share/runtime/task.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -86,7 +86,7 @@ void PeriodicTask::enroll() { // not already own the PeriodicTask_lock. Otherwise, we don't try to // enter it again because VM internal Mutexes do not support recursion. // - MutexLocker ml(PeriodicTask_lock->owned_by_self() ? NULL : PeriodicTask_lock); + MutexLocker ml(PeriodicTask_lock->owned_by_self() ? nullptr : PeriodicTask_lock); if (_num_tasks == PeriodicTask::max_tasks) { fatal("Overflow in PeriodicTask table"); @@ -95,7 +95,7 @@ void PeriodicTask::enroll() { } WatcherThread* thread = WatcherThread::watcher_thread(); - if (thread != NULL) { + if (thread != nullptr) { thread->unpark(); } else { WatcherThread::start(); @@ -108,7 +108,7 @@ void PeriodicTask::disenroll() { // not already own the PeriodicTask_lock. Otherwise, we don't try to // enter it again because VM internal Mutexes do not support recursion. // - MutexLocker ml(PeriodicTask_lock->owned_by_self() ? NULL : PeriodicTask_lock); + MutexLocker ml(PeriodicTask_lock->owned_by_self() ? nullptr : PeriodicTask_lock); int index; for(index = 0; index < _num_tasks && _tasks[index] != this; index++) diff --git a/src/hotspot/share/runtime/thread.cpp b/src/hotspot/share/runtime/thread.cpp index 18ae011444215..2d8e0291a430a 100644 --- a/src/hotspot/share/runtime/thread.cpp +++ b/src/hotspot/share/runtime/thread.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -54,7 +54,7 @@ #ifndef USE_LIBRARY_BASED_TLS_ONLY // Current thread is maintained as a thread-local variable -THREAD_LOCAL Thread* Thread::_thr_current = NULL; +THREAD_LOCAL Thread* Thread::_thr_current = nullptr; #endif // ======= Thread ======== @@ -70,31 +70,31 @@ void Thread::operator delete(void* p) { // Base class for all threads: VMThread, WatcherThread, ConcurrentMarkSweepThread, // JavaThread -DEBUG_ONLY(Thread* Thread::_starting_thread = NULL;) +DEBUG_ONLY(Thread* Thread::_starting_thread = nullptr;) Thread::Thread() { DEBUG_ONLY(_run_state = PRE_CALL_RUN;) // stack and get_thread - set_stack_base(NULL); + set_stack_base(nullptr); set_stack_size(0); set_lgrp_id(-1); DEBUG_ONLY(clear_suspendible_thread();) // allocated data structures - set_osthread(NULL); + set_osthread(nullptr); set_resource_area(new (mtThread)ResourceArea()); - DEBUG_ONLY(_current_resource_mark = NULL;) - set_handle_area(new (mtThread) HandleArea(NULL)); + DEBUG_ONLY(_current_resource_mark = nullptr;) + set_handle_area(new (mtThread) HandleArea(nullptr)); set_metadata_handles(new (mtClass) GrowableArray(30, mtClass)); - set_last_handle_mark(NULL); - DEBUG_ONLY(_missed_ic_stub_refill_verifier = NULL); + set_last_handle_mark(nullptr); + DEBUG_ONLY(_missed_ic_stub_refill_verifier = nullptr); // Initial value of zero ==> never claimed. _threads_do_token = 0; - _threads_hazard_ptr = NULL; - _threads_list_ptr = NULL; + _threads_hazard_ptr = nullptr; + _threads_list_ptr = nullptr; _nested_threads_hazard_ptr_cnt = 0; _rcu_counter = 0; @@ -102,11 +102,11 @@ Thread::Thread() { new HandleMark(this); // plain initialization - debug_only(_owned_locks = NULL;) + debug_only(_owned_locks = nullptr;) NOT_PRODUCT(_skip_gcalot = false;) _jvmti_env_iteration_count = 0; set_allocated_bytes(0); - _current_pending_raw_monitor = NULL; + _current_pending_raw_monitor = nullptr; // thread-specific hashCode stream generator state - Marsaglia shift-xor form _hashStateX = os::random(); @@ -134,14 +134,14 @@ Thread::Thread() { // BarrierSet::on_thread_create() for this thread is therefore deferred // to BarrierSet::set_barrier_set(). BarrierSet* const barrier_set = BarrierSet::barrier_set(); - if (barrier_set != NULL) { + if (barrier_set != nullptr) { barrier_set->on_thread_create(this); } else { // Only the main thread should be created before the barrier set // and that happens just before Thread::current is set. No other thread // can attach as the VM is not created yet, so they can't execute this code. // If the main thread creates other threads before the barrier set that is an error. - assert(Thread::current_or_null() == NULL, "creating thread before barrier set"); + assert(Thread::current_or_null() == nullptr, "creating thread before barrier set"); } MACOS_AARCH64_ONLY(DEBUG_ONLY(_wx_init = false)); @@ -155,10 +155,10 @@ void Thread::initialize_tlab() { void Thread::initialize_thread_current() { #ifndef USE_LIBRARY_BASED_TLS_ONLY - assert(_thr_current == NULL, "Thread::current already initialized"); + assert(_thr_current == nullptr, "Thread::current already initialized"); _thr_current = this; #endif - assert(ThreadLocalStorage::thread() == NULL, "ThreadLocalStorage::thread already initialized"); + assert(ThreadLocalStorage::thread() == nullptr, "ThreadLocalStorage::thread already initialized"); ThreadLocalStorage::set_thread(this); assert(Thread::current() == ThreadLocalStorage::thread(), "TLS mismatch!"); } @@ -166,9 +166,9 @@ void Thread::initialize_thread_current() { void Thread::clear_thread_current() { assert(Thread::current() == ThreadLocalStorage::thread(), "TLS mismatch!"); #ifndef USE_LIBRARY_BASED_TLS_ONLY - _thr_current = NULL; + _thr_current = nullptr; #endif - ThreadLocalStorage::set_thread(NULL); + ThreadLocalStorage::set_thread(nullptr); } void Thread::record_stack_base_and_size() { @@ -199,7 +199,7 @@ void Thread::call_run() { // At this point, Thread object should be fully initialized and // Thread::current() should be set. - assert(Thread::current_or_null() != NULL, "current thread is unset"); + assert(Thread::current_or_null() != nullptr, "current thread is unset"); assert(Thread::current_or_null() == this, "current thread is wrong"); // Perform common initialization actions @@ -226,7 +226,7 @@ void Thread::call_run() { // Perform common tear-down actions - assert(Thread::current_or_null() != NULL, "current thread is unset"); + assert(Thread::current_or_null() != nullptr, "current thread is unset"); assert(Thread::current_or_null() == this, "current thread is wrong"); // Perform tear-down actions @@ -239,7 +239,7 @@ void Thread::call_run() { // asynchronously with respect to its termination - that is what _run_state can // be used to check. - assert(Thread::current_or_null() == NULL, "current thread still present"); + assert(Thread::current_or_null() == nullptr, "current thread still present"); } Thread::~Thread() { @@ -254,7 +254,7 @@ Thread::~Thread() { // Notify the barrier set that a thread is being destroyed. Note that a barrier // set might not be available if we encountered errors during bootstrapping. BarrierSet* const barrier_set = BarrierSet::barrier_set(); - if (barrier_set != NULL) { + if (barrier_set != nullptr) { barrier_set->on_thread_destroy(this); } @@ -262,19 +262,19 @@ Thread::~Thread() { delete resource_area(); // since the handle marks are using the handle area, we have to deallocated the root // handle mark before deallocating the thread's handle area, - assert(last_handle_mark() != NULL, "check we have an element"); + assert(last_handle_mark() != nullptr, "check we have an element"); delete last_handle_mark(); - assert(last_handle_mark() == NULL, "check we have reached the end"); + assert(last_handle_mark() == nullptr, "check we have reached the end"); ParkEvent::Release(_ParkEvent); - // Set to NULL as a termination indicator for has_terminated(). - Atomic::store(&_ParkEvent, (ParkEvent*)NULL); + // Set to null as a termination indicator for has_terminated(). + Atomic::store(&_ParkEvent, (ParkEvent*)nullptr); delete handle_area(); delete metadata_handles(); - // osthread() can be NULL, if creation of thread failed. - if (osthread() != NULL) os::free_thread(osthread()); + // osthread() can be nullptr, if creation of thread failed. + if (osthread() != nullptr) os::free_thread(osthread()); // Clear Thread::current if thread is deleting itself and it has not // already been done. This must be done before the memory is deallocated. @@ -315,7 +315,7 @@ bool Thread::is_JavaThread_protected(const JavaThread* target) { // If the target hasn't been started yet then it is trivially // "protected". We assume the caller is the thread that will do // the starting. - if (target->osthread() == NULL || target->osthread()->get_state() <= INITIALIZED) { + if (target->osthread() == nullptr || target->osthread()->get_state() <= INITIALIZED) { return true; } @@ -357,7 +357,7 @@ bool Thread::is_JavaThread_protected_by_TLH(const JavaThread* target) { // Check the ThreadsLists associated with the calling thread (if any) // to see if one of them protects the target JavaThread: for (SafeThreadsListPtr* stlp = current_thread->_threads_list_ptr; - stlp != NULL; stlp = stlp->previous()) { + stlp != nullptr; stlp = stlp->previous()) { if (stlp->list()->includes(target)) { // The target JavaThread is protected by this ThreadsList: return true; @@ -417,17 +417,17 @@ class RememberProcessedThread: public StackObj { Thread* self = Thread::current(); if (self->is_Named_thread()) { _cur_thr = (NamedThread *)self; - assert(_cur_thr->processed_thread() == NULL, "nesting not supported"); + assert(_cur_thr->processed_thread() == nullptr, "nesting not supported"); _cur_thr->set_processed_thread(thread); } else { - _cur_thr = NULL; + _cur_thr = nullptr; } } ~RememberProcessedThread() { if (_cur_thr) { - assert(_cur_thr->processed_thread() != NULL, "nesting not supported"); - _cur_thr->set_processed_thread(NULL); + assert(_cur_thr->processed_thread() != nullptr, "nesting not supported"); + _cur_thr->set_processed_thread(nullptr); } } }; @@ -441,7 +441,7 @@ void Thread::oops_do(OopClosure* f, CodeBlobClosure* cf) { void Thread::metadata_handles_do(void f(Metadata*)) { // Only walk the Handles in Thread. - if (metadata_handles() != NULL) { + if (metadata_handles() != nullptr) { for (int i = 0; i< metadata_handles()->length(); i++) { f(metadata_handles()->at(i)); } @@ -450,7 +450,7 @@ void Thread::metadata_handles_do(void f(Metadata*)) { void Thread::print_on(outputStream* st, bool print_extended_info) const { // get_priority assumes osthread initialized - if (osthread() != NULL) { + if (osthread() != nullptr) { int os_prio; if (os::get_native_priority(this, &os_prio) == OS_OK) { st->print("os_prio=%d ", os_prio); @@ -491,7 +491,7 @@ void Thread::print_on_error(outputStream* st, char* buf, int buflen) const { st->print("%s \"%s\"", type_name(), name()); OSThread* os_thr = osthread(); - if (os_thr != NULL) { + if (os_thr != nullptr) { if (os_thr->get_state() != ZOMBIE) { st->print(" [stack: " PTR_FORMAT "," PTR_FORMAT "]", p2i(stack_end()), p2i(stack_base())); @@ -515,7 +515,7 @@ void Thread::print_value_on(outputStream* st) const { #ifdef ASSERT void Thread::print_owned_locks_on(outputStream* st) const { Mutex* cur = _owned_locks; - if (cur == NULL) { + if (cur == nullptr) { st->print(" (no locks) "); } else { st->print_cr(" Locks owned:"); @@ -537,7 +537,7 @@ bool Thread::is_lock_owned(address adr) const { } bool Thread::set_as_starting_thread() { - assert(_starting_thread == NULL, "already initialized: " + assert(_starting_thread == nullptr, "already initialized: " "_starting_thread=" INTPTR_FORMAT, p2i(_starting_thread)); // NOTE: this must be called inside the main thread. DEBUG_ONLY(_starting_thread = this;) diff --git a/src/hotspot/share/runtime/thread.hpp b/src/hotspot/share/runtime/thread.hpp index eb4ba39e443c7..b057413910ded 100644 --- a/src/hotspot/share/runtime/thread.hpp +++ b/src/hotspot/share/runtime/thread.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -345,11 +345,11 @@ class Thread: public ThreadShadow { // and logging. virtual const char* type_name() const { return "Thread"; } - // Returns the current thread (ASSERTS if NULL) + // Returns the current thread (ASSERTS if nullptr) static inline Thread* current(); - // Returns the current thread, or NULL if not attached + // Returns the current thread, or null if not attached static inline Thread* current_or_null(); - // Returns the current thread, or NULL if not attached, and is + // Returns the current thread, or null if not attached, and is // safe for use from signal-handlers static inline Thread* current_or_null_safe(); @@ -435,7 +435,7 @@ class Thread: public ThreadShadow { // GC support // Apply "f->do_oop" to all root oops in "this". // Used by JavaThread::oops_do. - // Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames + // Apply "cf->do_code_blob" (if !nullptr) to all code blobs active in frames virtual void oops_do_no_frames(OopClosure* f, CodeBlobClosure* cf); virtual void oops_do_frames(OopClosure* f, CodeBlobClosure* cf) {} void oops_do(OopClosure* f, CodeBlobClosure* cf); @@ -535,7 +535,7 @@ class Thread: public ThreadShadow { public: // Stack overflow support - address stack_base() const { assert(_stack_base != NULL,"Sanity check"); return _stack_base; } + address stack_base() const { assert(_stack_base != nullptr,"Sanity check"); return _stack_base; } void set_stack_base(address base) { _stack_base = base; } size_t stack_size() const { return _stack_size; } void set_stack_size(size_t size) { _stack_size = size; } @@ -569,7 +569,7 @@ class Thread: public ThreadShadow { void print_owned_locks_on(outputStream* st) const; void print_owned_locks() const { print_owned_locks_on(tty); } Mutex* owned_locks() const { return _owned_locks; } - bool owns_locks() const { return owned_locks() != NULL; } + bool owns_locks() const { return owned_locks() != nullptr; } // Deadlock detection ResourceMark* current_resource_mark() { return _current_resource_mark; } @@ -605,9 +605,9 @@ class Thread: public ThreadShadow { // and ObjectSynchronizer::read_stable_mark // Termination indicator used by the signal handler. - // _ParkEvent is just a convenient field we can NULL out after setting the JavaThread termination state + // _ParkEvent is just a convenient field we can null out after setting the JavaThread termination state // (which can't itself be read from the signal handler if a signal hits during the Thread destructor). - bool has_terminated() { return Atomic::load(&_ParkEvent) == NULL; }; + bool has_terminated() { return Atomic::load(&_ParkEvent) == nullptr; }; jint _hashStateW; // Marsaglia Shift-XOR thread-local RNG jint _hashStateX; // thread-specific hashCode generator state @@ -636,7 +636,7 @@ class Thread: public ThreadShadow { // Inline implementation of Thread::current() inline Thread* Thread::current() { Thread* current = current_or_null(); - assert(current != NULL, "Thread::current() called on detached thread"); + assert(current != nullptr, "Thread::current() called on detached thread"); return current; } @@ -647,7 +647,7 @@ inline Thread* Thread::current_or_null() { if (ThreadLocalStorage::is_initialized()) { return ThreadLocalStorage::thread(); } - return NULL; + return nullptr; #endif } @@ -655,7 +655,7 @@ inline Thread* Thread::current_or_null_safe() { if (ThreadLocalStorage::is_initialized()) { return ThreadLocalStorage::thread(); } - return NULL; + return nullptr; } #endif // SHARE_RUNTIME_THREAD_HPP diff --git a/src/hotspot/share/runtime/threadSMR.cpp b/src/hotspot/share/runtime/threadSMR.cpp index 303f137fb5026..dd64b129308f3 100644 --- a/src/hotspot/share/runtime/threadSMR.cpp +++ b/src/hotspot/share/runtime/threadSMR.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -127,7 +127,7 @@ volatile uint ThreadsSMRSupport::_tlh_time_max = 0; // isn't available everywhere (or is it?). volatile uint ThreadsSMRSupport::_tlh_times = 0; -ThreadsList* ThreadsSMRSupport::_to_delete_list = NULL; +ThreadsList* ThreadsSMRSupport::_to_delete_list = nullptr; // # of parallel ThreadsLists on the to-delete list. // Impl note: Hard to imagine > 64K ThreadsLists needing to be deleted so @@ -209,7 +209,7 @@ class ThreadScanHashtable : public CHeapObj { bool has_entry(void *pointer) { int *val_ptr = _ptrs->get(pointer); - return val_ptr != NULL && *val_ptr == 1; + return val_ptr != nullptr && *val_ptr == 1; } void add_entry(void *pointer) { @@ -253,16 +253,16 @@ class ScanHazardPtrGatherProtectedThreadsClosure : public ThreadClosure { virtual void do_thread(Thread *thread) { assert_locked_or_safepoint(Threads_lock); - if (thread == NULL) return; + if (thread == nullptr) return; // This code races with ThreadsSMRSupport::acquire_stable_list() which // is lock-free so we have to handle some special situations. // - ThreadsList *current_list = NULL; + ThreadsList *current_list = nullptr; while (true) { current_list = thread->get_threads_hazard_ptr(); // No hazard ptr so nothing more to do. - if (current_list == NULL) { + if (current_list == nullptr) { return; } @@ -277,7 +277,7 @@ class ScanHazardPtrGatherProtectedThreadsClosure : public ThreadClosure { // thread will retry the attempt to publish a stable hazard ptr. // If we lose the race, then we retry our attempt to look at the // hazard ptr. - if (thread->cmpxchg_threads_hazard_ptr(NULL, current_list) == current_list) return; + if (thread->cmpxchg_threads_hazard_ptr(nullptr, current_list) == current_list) return; } assert(ThreadsList::is_valid(current_list), "current_list=" @@ -308,9 +308,9 @@ class ScanHazardPtrGatherThreadsListClosure : public ThreadClosure { virtual void do_thread(Thread* thread) { assert_locked_or_safepoint(Threads_lock); - if (thread == NULL) return; + if (thread == nullptr) return; ThreadsList *hazard_ptr = thread->get_threads_hazard_ptr(); - if (hazard_ptr == NULL) return; + if (hazard_ptr == nullptr) return; #ifdef ASSERT if (!Thread::is_hazard_ptr_tagged(hazard_ptr)) { // We only validate hazard_ptrs that are not tagged since a tagged @@ -344,9 +344,9 @@ class ScanHazardPtrPrintMatchingThreadsClosure : public ThreadClosure { virtual void do_thread(Thread *thread) { assert_locked_or_safepoint(Threads_lock); - if (thread == NULL) return; + if (thread == nullptr) return; ThreadsList *current_list = thread->get_threads_hazard_ptr(); - if (current_list == NULL) { + if (current_list == nullptr) { return; } // If the hazard ptr is unverified, then ignore it. @@ -358,7 +358,7 @@ class ScanHazardPtrPrintMatchingThreadsClosure : public ThreadClosure { // the hazard ptr is protecting all the JavaThreads on that // ThreadsList, but we only care about matching a specific JavaThread. JavaThreadIterator jti(current_list); - for (JavaThread *p = jti.first(); p != NULL; p = jti.next()) { + for (JavaThread *p = jti.first(); p != nullptr; p = jti.next()) { if (p == _thread) { log_debug(thread, smr)("tid=" UINTX_FORMAT ": ThreadsSMRSupport::smr_delete: thread1=" INTPTR_FORMAT " has a hazard pointer for thread2=" INTPTR_FORMAT, os::current_thread_id(), p2i(thread), p2i(_thread)); break; @@ -376,9 +376,9 @@ class ValidateHazardPtrsClosure : public ThreadClosure { virtual void do_thread(Thread* thread) { assert_locked_or_safepoint(Threads_lock); - if (thread == NULL) return; + if (thread == nullptr) return; ThreadsList *hazard_ptr = thread->get_threads_hazard_ptr(); - if (hazard_ptr == NULL) return; + if (hazard_ptr == nullptr) return; // If the hazard ptr is unverified, then ignore it since it could // be deleted at any time now. if (Thread::is_hazard_ptr_tagged(hazard_ptr)) return; @@ -412,12 +412,12 @@ class VerifyHazardPtrThreadClosure : public ThreadClosure { // Acquire a stable ThreadsList. // void SafeThreadsListPtr::acquire_stable_list() { - assert(_thread != NULL, "sanity check"); + assert(_thread != nullptr, "sanity check"); _needs_release = true; _previous = _thread->_threads_list_ptr; _thread->_threads_list_ptr = this; - if (_thread->get_threads_hazard_ptr() == NULL && _previous == NULL) { + if (_thread->get_threads_hazard_ptr() == nullptr && _previous == nullptr) { // The typical case is first. acquire_stable_list_fast_path(); return; @@ -430,8 +430,8 @@ void SafeThreadsListPtr::acquire_stable_list() { // Fast path way to acquire a stable ThreadsList. // void SafeThreadsListPtr::acquire_stable_list_fast_path() { - assert(_thread != NULL, "sanity check"); - assert(_thread->get_threads_hazard_ptr() == NULL, "sanity check"); + assert(_thread != nullptr, "sanity check"); + assert(_thread->get_threads_hazard_ptr() == nullptr, "sanity check"); ThreadsList* threads; @@ -483,7 +483,7 @@ void SafeThreadsListPtr::acquire_stable_list_fast_path() { // reference counting. // void SafeThreadsListPtr::acquire_stable_list_nested_path() { - assert(_thread != NULL, "sanity check"); + assert(_thread != nullptr, "sanity check"); // The thread already has a hazard ptr (ThreadsList ref) so we need // to create a nested ThreadsListHandle with the current ThreadsList @@ -504,7 +504,7 @@ void SafeThreadsListPtr::acquire_stable_list_nested_path() { } // Clear the hazard ptr so we can go through the fast path below and // acquire a nested stable ThreadsList. - _thread->set_threads_hazard_ptr(NULL); + _thread->set_threads_hazard_ptr(nullptr); if (EnableThreadSMRStatistics && _thread->nested_threads_hazard_ptr_cnt() > ThreadsSMRSupport::_nested_thread_list_max) { ThreadsSMRSupport::_nested_thread_list_max = _thread->nested_threads_hazard_ptr_cnt(); @@ -520,15 +520,15 @@ void SafeThreadsListPtr::acquire_stable_list_nested_path() { // Release a stable ThreadsList. // void SafeThreadsListPtr::release_stable_list() { - assert(_thread != NULL, "sanity check"); + assert(_thread != nullptr, "sanity check"); assert(_thread->_threads_list_ptr == this, "sanity check"); _thread->_threads_list_ptr = _previous; // We're releasing either a leaf or nested ThreadsListHandle. In either - // case, we set this thread's hazard ptr back to NULL and we do it before + // case, we set this thread's hazard ptr back to null and we do it before // _nested_handle_cnt is decremented below. - _thread->set_threads_hazard_ptr(NULL); - if (_previous != NULL) { + _thread->set_threads_hazard_ptr(nullptr); + if (_previous != nullptr) { // The ThreadsListHandle being released is a nested ThreadsListHandle. if (EnableThreadSMRStatistics) { _thread->dec_nested_threads_hazard_ptr_cnt(); @@ -559,7 +559,7 @@ void SafeThreadsListPtr::release_stable_list() { // An exiting thread might be waiting in smr_delete(); we need to // check with delete_lock to be sure. ThreadsSMRSupport::release_stable_list_wake_up(_has_ref_count); - assert(_previous == NULL || ThreadsList::is_valid(_previous->_list), + assert(_previous == nullptr || ThreadsList::is_valid(_previous->_list), "_previous->_list=" INTPTR_FORMAT " is not valid after calling release_stable_list_wake_up!", p2i(_previous->_list)); @@ -571,7 +571,7 @@ void SafeThreadsListPtr::release_stable_list() { // the Thread-SMR protocol. void SafeThreadsListPtr::verify_hazard_ptr_scanned() { #ifdef ASSERT - assert(_list != NULL, "_list must not be NULL"); + assert(_list != nullptr, "_list must not be null"); if (ThreadsSMRSupport::is_bootstrap_list(_list)) { // We are early in VM bootstrapping so nothing to do here. @@ -620,16 +620,16 @@ void SafeThreadsListPtr::verify_hazard_ptr_scanned() { // Shared singleton data for all ThreadsList(0) instances. // Used by _bootstrap_list to avoid static init time heap allocation. -// No real entries, just the final NULL terminator. +// No real entries, just the final nullptr terminator. static JavaThread* const empty_threads_list_data[1] = {}; -// Result has 'entries + 1' elements, with the last being the NULL terminator. +// Result has 'entries + 1' elements, with the last being the null terminator. static JavaThread* const* make_threads_list_data(int entries) { if (entries == 0) { return empty_threads_list_data; } JavaThread** data = NEW_C_HEAP_ARRAY(JavaThread*, entries + 1, mtThread); - data[entries] = NULL; // Make sure the final entry is NULL. + data[entries] = nullptr; // Make sure the final entry is null. return data; } @@ -659,7 +659,7 @@ void ThreadsList::Iterator::assert_same_list(Iterator i) const { ThreadsList::ThreadsList(int entries) : _magic(THREADS_LIST_MAGIC), _length(entries), - _next_list(NULL), + _next_list(nullptr), _threads(make_threads_list_data(entries)), _nested_handle_cnt(0) {} @@ -693,7 +693,7 @@ void ThreadsList::dec_nested_handle_cnt() { } int ThreadsList::find_index_of_JavaThread(JavaThread *target) { - if (target == NULL) { + if (target == nullptr) { return -1; } for (uint i = 0; i < length(); i++) { @@ -707,7 +707,7 @@ int ThreadsList::find_index_of_JavaThread(JavaThread *target) { JavaThread* ThreadsList::find_JavaThread_from_java_tid(jlong java_tid) const { ThreadIdTable::lazy_initialize(this); JavaThread* thread = ThreadIdTable::find_thread_by_tid(java_tid); - if (thread == NULL) { + if (thread == nullptr) { // If the thread is not found in the table find it // with a linear search and add to the table. for (uint i = 0; i < length(); i++) { @@ -715,7 +715,7 @@ JavaThread* ThreadsList::find_JavaThread_from_java_tid(jlong java_tid) const { oop tobj = thread->threadObj(); // Ignore the thread if it hasn't run yet, has exited // or is starting to exit. - if (tobj != NULL && java_tid == java_lang_Thread::thread_id(tobj)) { + if (tobj != nullptr && java_tid == java_lang_Thread::thread_id(tobj)) { MutexLocker ml(Threads_lock); // Must be inside the lock to ensure that we don't add a thread to the table // that has just passed the removal point in Threads::remove(). @@ -728,7 +728,7 @@ JavaThread* ThreadsList::find_JavaThread_from_java_tid(jlong java_tid) const { } else if (!thread->is_exiting()) { return thread; } - return NULL; + return nullptr; } void ThreadsList::inc_nested_handle_cnt() { @@ -736,7 +736,7 @@ void ThreadsList::inc_nested_handle_cnt() { } bool ThreadsList::includes(const JavaThread * const p) const { - if (p == NULL) { + if (p == nullptr) { return false; } for (uint i = 0; i < length(); i++) { @@ -790,7 +790,7 @@ ThreadsListHandle::~ThreadsListHandle() { // associated ThreadsList. This ThreadsListHandle "protects" the // returned JavaThread *. // -// If thread_oop_p is not NULL, then the caller wants to use the oop +// If thread_oop_p is not null, then the caller wants to use the oop // after this call so the oop is returned. On success, *jt_pp is set // to the converted JavaThread * and true is returned. On error, // returns false. @@ -798,25 +798,25 @@ ThreadsListHandle::~ThreadsListHandle() { bool ThreadsListHandle::cv_internal_thread_to_JavaThread(jobject jthread, JavaThread ** jt_pp, oop * thread_oop_p) { - assert(this->list() != NULL, "must have a ThreadsList"); - assert(jt_pp != NULL, "must have a return JavaThread pointer"); + assert(this->list() != nullptr, "must have a ThreadsList"); + assert(jt_pp != nullptr, "must have a return JavaThread pointer"); // thread_oop_p is optional so no assert() - // The JVM_* interfaces don't allow a NULL thread parameter; JVM/TI - // allows a NULL thread parameter to signify "current thread" which + // The JVM_* interfaces don't allow a null thread parameter; JVM/TI + // allows a null thread parameter to signify "current thread" which // allows us to avoid calling cv_external_thread_to_JavaThread(). // The JVM_* interfaces have no such leeway. oop thread_oop = JNIHandles::resolve_non_null(jthread); // Looks like an oop at this point. - if (thread_oop_p != NULL) { + if (thread_oop_p != nullptr) { // Return the oop to the caller; the caller may still want // the oop even if this function returns false. *thread_oop_p = thread_oop; } JavaThread *java_thread = java_lang_Thread::thread(thread_oop); - if (java_thread == NULL) { + if (java_thread == nullptr) { // The java.lang.Thread does not contain a JavaThread * so it has // not yet run or it has died. return false; @@ -903,14 +903,14 @@ void ThreadsSMRSupport::free_list(ThreadsList* threads) { // Walk through the linked list of pending freeable ThreadsLists // and free the ones that are not referenced from hazard ptrs. ThreadsList* current = _to_delete_list; - ThreadsList* prev = NULL; - ThreadsList* next = NULL; + ThreadsList* prev = nullptr; + ThreadsList* next = nullptr; bool threads_is_freed = false; - while (current != NULL) { + while (current != nullptr) { next = current->next_list(); if (!scan_table->has_entry((void*)current) && current->_nested_handle_cnt == 0) { // This ThreadsList is not referenced by a hazard ptr. - if (prev != NULL) { + if (prev != nullptr) { prev->set_next_list(next); } if (_to_delete_list == current) { @@ -960,7 +960,7 @@ bool ThreadsSMRSupport::is_a_protected_JavaThread(JavaThread *thread) { // and include the ones that are currently in use by a nested // ThreadsListHandle in the search set. ThreadsList* current = _to_delete_list; - while (current != NULL) { + while (current != nullptr) { if (current->_nested_handle_cnt != 0) { // 'current' is in use by a nested ThreadsListHandle so the hazard // ptr is protecting all the JavaThreads on that ThreadsList. @@ -1075,7 +1075,7 @@ void ThreadsSMRSupport::wait_until_not_protected(JavaThread *thread) { ScanHazardPtrPrintMatchingThreadsClosure scan_cl(thread); threads_do(&scan_cl); ThreadsList* current = _to_delete_list; - while (current != NULL) { + while (current != nullptr) { if (current->_nested_handle_cnt != 0 && current->includes(thread)) { log_debug(thread, smr)("tid=" UINTX_FORMAT ": ThreadsSMRSupport::wait_until_not_protected: found nested hazard pointer to thread=" INTPTR_FORMAT, os::current_thread_id(), p2i(thread)); } @@ -1143,10 +1143,10 @@ void ThreadsSMRSupport::log_statistics() { // Print SMR info for a thread to a given output stream. void ThreadsSMRSupport::print_info_on(const Thread* thread, outputStream* st) { ThreadsList* hazard_ptr = thread->get_threads_hazard_ptr(); - if (hazard_ptr != NULL) { + if (hazard_ptr != nullptr) { st->print(" _threads_hazard_ptr=" INTPTR_FORMAT, p2i(hazard_ptr)); } - if (EnableThreadSMRStatistics && thread->_threads_list_ptr != NULL) { + if (EnableThreadSMRStatistics && thread->_threads_list_ptr != nullptr) { // The count is only interesting if we have a _threads_list_ptr. st->print(", _nested_threads_hazard_ptr_cnt=%u", thread->_nested_threads_hazard_ptr_cnt); } @@ -1154,11 +1154,11 @@ void ThreadsSMRSupport::print_info_on(const Thread* thread, outputStream* st) { // It is only safe to walk the list if we're at a safepoint or the // calling thread is walking its own list. SafeThreadsListPtr* current = thread->_threads_list_ptr; - if (current != NULL) { + if (current != nullptr) { // Skip the top nesting level as it is always printed above. current = current->previous(); } - while (current != NULL) { + while (current != nullptr) { current->print_on(st); current = current->previous(); } @@ -1175,7 +1175,7 @@ void ThreadsSMRSupport::print_info_on(outputStream* st) { needs_unlock = true; } - ThreadsList* saved_threads_list = NULL; + ThreadsList* saved_threads_list = nullptr; { ThreadsListHandle tlh; // make the current ThreadsList safe for reporting saved_threads_list = tlh.list(); // save for later comparison @@ -1187,7 +1187,7 @@ void ThreadsSMRSupport::print_info_on(outputStream* st) { st->print_cr("}"); } - if (_to_delete_list != NULL) { + if (_to_delete_list != nullptr) { if (Threads_lock->owned_by_self()) { // Only safe if we have the Threads_lock. st->print_cr("_to_delete_list=" INTPTR_FORMAT ", length=%u, elements={", @@ -1195,7 +1195,7 @@ void ThreadsSMRSupport::print_info_on(outputStream* st) { print_info_elements_on(st, _to_delete_list); st->print_cr("}"); for (ThreadsList *t_list = _to_delete_list->next_list(); - t_list != NULL; t_list = t_list->next_list()) { + t_list != nullptr; t_list = t_list->next_list()) { st->print("next-> " INTPTR_FORMAT ", length=%u, elements={", p2i(t_list), t_list->length()); print_info_elements_on(st, t_list); @@ -1254,7 +1254,7 @@ void ThreadsSMRSupport::print_info_on(outputStream* st) { void ThreadsSMRSupport::print_info_elements_on(outputStream* st, ThreadsList* t_list) { uint cnt = 0; JavaThreadIterator jti(t_list); - for (JavaThread *jt = jti.first(); jt != NULL; jt = jti.next()) { + for (JavaThread *jt = jti.first(); jt != nullptr; jt = jti.next()) { st->print(INTPTR_FORMAT, p2i(jt)); if (cnt < t_list->length() - 1) { // Separate with comma or comma-space except for the last one. diff --git a/src/hotspot/share/runtime/threadSMR.hpp b/src/hotspot/share/runtime/threadSMR.hpp index 7760aeff16994..0cb6da8da9ca9 100644 --- a/src/hotspot/share/runtime/threadSMR.hpp +++ b/src/hotspot/share/runtime/threadSMR.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -52,8 +52,8 @@ class ThreadsList; // jobject jthread = ...; // : // ThreadsListHandle tlh; -// JavaThread* jt = NULL; -// bool is_alive = tlh.cv_internal_thread_to_JavaThread(jthread, &jt, NULL); +// JavaThread* jt = nullptr; +// bool is_alive = tlh.cv_internal_thread_to_JavaThread(jthread, &jt, nullptr); // if (is_alive) { // : // do stuff with 'jt'... // } @@ -61,9 +61,9 @@ class ThreadsList; // JVM/TI jthread example: // jthread thread = ...; // : -// JavaThread* jt = NULL; +// JavaThread* jt = nullptr; // ThreadsListHandle tlh; -// jvmtiError err = JvmtiExport::cv_external_thread_to_JavaThread(tlh.list(), thread, &jt, NULL); +// jvmtiError err = JvmtiExport::cv_external_thread_to_JavaThread(tlh.list(), thread, &jt, nullptr); // if (err != JVMTI_ERROR_NONE) { // return err; // } @@ -72,7 +72,7 @@ class ThreadsList; // JVM/TI oop example (this one should be very rare): // oop thread_obj = ...; // : -// JavaThread *jt = NULL; +// JavaThread *jt = nullptr; // ThreadsListHandle tlh; // jvmtiError err = JvmtiExport::cv_oop_to_JavaThread(tlh.list(), thread_obj, &jt); // if (err != JVMTI_ERROR_NONE) { @@ -260,9 +260,9 @@ class SafeThreadsListPtr { public: // Constructor that attaches the list onto a thread. SafeThreadsListPtr(Thread *thread, bool acquire) : - _previous(NULL), + _previous(nullptr), _thread(thread), - _list(NULL), + _list(nullptr), _has_ref_count(false), _needs_release(false) { @@ -337,7 +337,7 @@ class ThreadsListHandle : public StackObj { // specified ThreadsList using the following style: // // JavaThreadIterator jti(t_list); -// for (JavaThread *jt = jti.first(); jt != NULL; jt = jti.next()) { +// for (JavaThread *jt = jti.first(); jt != nullptr; jt = jti.next()) { // ... // } // @@ -347,7 +347,7 @@ class JavaThreadIterator : public StackObj { public: JavaThreadIterator(ThreadsList *list) : _list(list), _index(0) { - assert(list != NULL, "ThreadsList must not be NULL."); + assert(list != nullptr, "ThreadsList must not be null."); } JavaThread *first() { @@ -361,7 +361,7 @@ class JavaThreadIterator : public StackObj { JavaThread *next() { if (++_index >= length()) { - return NULL; + return nullptr; } return _list->thread_at(_index); } @@ -392,7 +392,7 @@ class JavaThreadIteratorWithHandle : public StackObj { JavaThread *next() { if (_index >= length()) { - return NULL; + return nullptr; } return _tlh.list()->thread_at(_index++); } diff --git a/src/hotspot/share/runtime/threadSMR.inline.hpp b/src/hotspot/share/runtime/threadSMR.inline.hpp index c8821a1562113..39b75b4dfa69b 100644 --- a/src/hotspot/share/runtime/threadSMR.inline.hpp +++ b/src/hotspot/share/runtime/threadSMR.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -140,7 +140,7 @@ inline ThreadsList* ThreadsSMRSupport::get_java_thread_list() { } inline bool ThreadsSMRSupport::is_a_protected_JavaThread_with_lock(JavaThread *thread) { - MutexLocker ml(Threads_lock->owned_by_self() ? NULL : Threads_lock); + MutexLocker ml(Threads_lock->owned_by_self() ? nullptr : Threads_lock); return is_a_protected_JavaThread(thread); } diff --git a/src/hotspot/share/runtime/threads.cpp b/src/hotspot/share/runtime/threads.cpp index 6e312501c4abd..a9a3d385e564b 100644 --- a/src/hotspot/share/runtime/threads.cpp +++ b/src/hotspot/share/runtime/threads.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -176,18 +176,18 @@ static void create_initial_thread(Handle thread_group, JavaThread* thread, static const char* get_java_version_info(InstanceKlass* ik, Symbol* field_name) { fieldDescriptor fd; - bool found = ik != NULL && + bool found = ik != nullptr && ik->find_local_field(field_name, vmSymbols::string_signature(), &fd); if (found) { oop name_oop = ik->java_mirror()->obj_field(fd.offset()); - if (name_oop == NULL) { - return NULL; + if (name_oop == nullptr) { + return nullptr; } const char* name = java_lang_String::as_utf8_string(name_oop); return name; } else { - return NULL; + return nullptr; } } @@ -217,7 +217,7 @@ bool Threads::_vm_complete = false; // The Java library method itself may be changed independently from the VM. static void call_postVMInitHook(TRAPS) { Klass* klass = SystemDictionary::resolve_or_null(vmSymbols::jdk_internal_vm_PostVMInitHook(), THREAD); - if (klass != NULL) { + if (klass != nullptr) { JavaValue result(T_VOID); JavaCalls::call_static(&result, klass, vmSymbols::run_method_name(), vmSymbols::void_method_signature(), @@ -519,7 +519,7 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) { JavaThread::_jvmci_old_thread_counters = NEW_C_HEAP_ARRAY(jlong, JVMCICounterSize, mtJVMCI); memset(JavaThread::_jvmci_old_thread_counters, 0, sizeof(jlong) * JVMCICounterSize); } else { - JavaThread::_jvmci_old_thread_counters = NULL; + JavaThread::_jvmci_old_thread_counters = nullptr; } #endif // INCLUDE_JVMCI @@ -817,8 +817,8 @@ extern "C" { static OnLoadEntry_t lookup_on_load(AgentLibrary* agent, const char *on_load_symbols[], size_t num_symbol_entries) { - OnLoadEntry_t on_load_entry = NULL; - void *library = NULL; + OnLoadEntry_t on_load_entry = nullptr; + void *library = nullptr; if (!agent->valid()) { char buffer[JVM_MAXPATHLEN]; @@ -831,13 +831,13 @@ static OnLoadEntry_t lookup_on_load(AgentLibrary* agent, library = agent->os_lib(); } else if (agent->is_absolute_path()) { library = os::dll_load(name, ebuf, sizeof ebuf); - if (library == NULL) { + if (library == nullptr) { const char *sub_msg = " in absolute path, with error: "; size_t len = strlen(msg) + strlen(name) + strlen(sub_msg) + strlen(ebuf) + 1; char *buf = NEW_C_HEAP_ARRAY(char, len, mtThread); jio_snprintf(buf, len, "%s%s%s%s", msg, name, sub_msg, ebuf); // If we can't find the agent, exit. - vm_exit_during_initialization(buf, NULL); + vm_exit_during_initialization(buf, nullptr); FREE_C_HEAP_ARRAY(char, buf); } } else { @@ -846,11 +846,11 @@ static OnLoadEntry_t lookup_on_load(AgentLibrary* agent, name)) { library = os::dll_load(buffer, ebuf, sizeof ebuf); } - if (library == NULL) { // Try the library path directory. + if (library == nullptr) { // Try the library path directory. if (os::dll_build_name(buffer, sizeof(buffer), name)) { library = os::dll_load(buffer, ebuf, sizeof ebuf); } - if (library == NULL) { + if (library == nullptr) { const char *sub_msg = " on the library path, with error: "; const char *sub_msg2 = "\nModule java.instrument may be missing from runtime image."; @@ -863,7 +863,7 @@ static OnLoadEntry_t lookup_on_load(AgentLibrary* agent, jio_snprintf(buf, len, "%s%s%s%s%s", msg, name, sub_msg, ebuf, sub_msg2); } // If we can't find the agent, exit. - vm_exit_during_initialization(buf, NULL); + vm_exit_during_initialization(buf, nullptr); FREE_C_HEAP_ARRAY(char, buf); } } @@ -901,15 +901,15 @@ void Threads::convert_vm_init_libraries_to_agents() { AgentLibrary* agent; AgentLibrary* next; - for (agent = Arguments::libraries(); agent != NULL; agent = next) { + for (agent = Arguments::libraries(); agent != nullptr; agent = next) { next = agent->next(); // cache the next agent now as this agent may get moved off this list OnLoadEntry_t on_load_entry = lookup_jvm_on_load(agent); // If there is an JVM_OnLoad function it will get called later, // otherwise see if there is an Agent_OnLoad - if (on_load_entry == NULL) { + if (on_load_entry == nullptr) { on_load_entry = lookup_agent_on_load(agent); - if (on_load_entry != NULL) { + if (on_load_entry != nullptr) { // switch it to the agent list -- so that Agent_OnLoad will be called, // JVM_OnLoad won't be attempted and Agent_OnUnload will Arguments::convert_library_to_agent(agent); @@ -929,7 +929,7 @@ void Threads::create_vm_init_agents() { JvmtiExport::enter_onload_phase(); - for (agent = Arguments::agents(); agent != NULL; agent = agent->next()) { + for (agent = Arguments::agents(); agent != nullptr; agent = agent->next()) { // CDS dumping does not support native JVMTI agent. // CDS dumping supports Java agent if the AllowArchivingWithJavaAgent diagnostic option is specified. if (Arguments::is_dumping_archive()) { @@ -943,9 +943,9 @@ void Threads::create_vm_init_agents() { OnLoadEntry_t on_load_entry = lookup_agent_on_load(agent); - if (on_load_entry != NULL) { + if (on_load_entry != nullptr) { // Invoke the Agent_OnLoad function - jint err = (*on_load_entry)(&main_vm, agent->options(), NULL); + jint err = (*on_load_entry)(&main_vm, agent->options(), nullptr); if (err != JNI_OK) { vm_exit_during_initialization("agent library failed to init", agent->name()); } @@ -966,7 +966,7 @@ void Threads::shutdown_vm_agents() { const char *on_unload_symbols[] = AGENT_ONUNLOAD_SYMBOLS; size_t num_symbol_entries = ARRAY_SIZE(on_unload_symbols); extern struct JavaVM_ main_vm; - for (AgentLibrary* agent = Arguments::agents(); agent != NULL; agent = agent->next()) { + for (AgentLibrary* agent = Arguments::agents(); agent != nullptr; agent = agent->next()) { // Find the Agent_OnUnload function. Agent_OnUnload_t unload_entry = CAST_TO_FN_PTR(Agent_OnUnload_t, @@ -976,7 +976,7 @@ void Threads::shutdown_vm_agents() { num_symbol_entries)); // Invoke the Agent_OnUnload function - if (unload_entry != NULL) { + if (unload_entry != nullptr) { JavaThread* thread = JavaThread::current(); ThreadToNativeFromVM ttn(thread); HandleMark hm(thread); @@ -991,15 +991,15 @@ void Threads::create_vm_init_libraries() { extern struct JavaVM_ main_vm; AgentLibrary* agent; - for (agent = Arguments::libraries(); agent != NULL; agent = agent->next()) { + for (agent = Arguments::libraries(); agent != nullptr; agent = agent->next()) { OnLoadEntry_t on_load_entry = lookup_jvm_on_load(agent); - if (on_load_entry != NULL) { + if (on_load_entry != nullptr) { // Invoke the JVM_OnLoad function JavaThread* thread = JavaThread::current(); ThreadToNativeFromVM ttn(thread); HandleMark hm(thread); - jint err = (*on_load_entry)(&main_vm, agent->options(), NULL); + jint err = (*on_load_entry)(&main_vm, agent->options(), nullptr); if (err != JNI_OK) { vm_exit_during_initialization("-Xrun library failed to init", agent->name()); } @@ -1373,8 +1373,8 @@ GrowableArray* Threads::get_pending_threads(ThreadsList * t_list, JavaThread *Threads::owning_thread_from_monitor_owner(ThreadsList * t_list, address owner) { - // NULL owner means not locked so we can skip the search - if (owner == NULL) return NULL; + // null owner means not locked so we can skip the search + if (owner == nullptr) return nullptr; for (JavaThread* p : *t_list) { // first, see if owner is the address of a Java thread @@ -1384,13 +1384,13 @@ JavaThread *Threads::owning_thread_from_monitor_owner(ThreadsList * t_list, // Cannot assert on lack of success here since this function may be // used by code that is trying to report useful problem information // like deadlock detection. - if (UseHeavyMonitors) return NULL; + if (UseHeavyMonitors) return nullptr; // If we didn't find a matching Java thread and we didn't force use of // heavyweight monitors, then the owner is the stack address of the // Lock Word in the owning Java thread's stack. // - JavaThread* the_owner = NULL; + JavaThread* the_owner = nullptr; for (JavaThread* q : *t_list) { if (q->is_lock_owned(owner)) { the_owner = q; @@ -1416,7 +1416,7 @@ class PrintOnClosure : public ThreadClosure { _st(st) {} virtual void do_thread(Thread* thread) { - if (thread != NULL) { + if (thread != nullptr) { thread->print_on(_st); _st->cr(); } @@ -1479,7 +1479,7 @@ void Threads::print_on(outputStream* st, bool print_stacks, void Threads::print_on_error(Thread* this_thread, outputStream* st, Thread* current, char* buf, int buflen, bool* found_current) { - if (this_thread != NULL) { + if (this_thread != nullptr) { bool is_current = (current == this_thread); *found_current = *found_current || is_current; st->print("%s", is_current ? "=>" : " "); @@ -1528,7 +1528,7 @@ void Threads::print_on_error(outputStream* st, Thread* current, char* buf, print_on_error(WatcherThread::watcher_thread(), st, current, buf, buflen, &found_current); print_on_error(AsyncLogWriter::instance(), st, current, buf, buflen, &found_current); - if (Universe::heap() != NULL) { + if (Universe::heap() != nullptr) { PrintOnErrorClosure print_closure(st, current, buf, buflen, &found_current); Universe::heap()->gc_threads_do(&print_closure); } @@ -1555,15 +1555,15 @@ void Threads::print_threads_compiling(outputStream* st, char* buf, int buflen, b if (thread->is_Compiler_thread()) { CompilerThread* ct = (CompilerThread*) thread; - // Keep task in local variable for NULL check. - // ct->_task might be set to NULL by concurring compiler thread + // Keep task in local variable for null check. + // ct->_task might be set to null by concurring compiler thread // because it completed the compilation. The task is never freed, // though, just returned to a free list. CompileTask* task = ct->task(); - if (task != NULL) { + if (task != nullptr) { thread->print_name_on_error(st, buf, buflen); st->print(" "); - task->print(st, NULL, short_form, true); + task->print(st, nullptr, short_form, true); } } } @@ -1574,5 +1574,5 @@ void Threads::verify() { p->verify(); } VMThread* thread = VMThread::vm_thread(); - if (thread != NULL) thread->verify(); + if (thread != nullptr) thread->verify(); } diff --git a/src/hotspot/share/runtime/timerTrace.cpp b/src/hotspot/share/runtime/timerTrace.cpp index f38d8162e15ce..d2cdd28089764 100644 --- a/src/hotspot/share/runtime/timerTrace.cpp +++ b/src/hotspot/share/runtime/timerTrace.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,10 +31,10 @@ TraceTime::TraceTime(const char* title, _active = doit; _verbose = true; _title = title; - _print = NULL; + _print = nullptr; if (_active) { - _accum = NULL; + _accum = nullptr; _t.start(); } } @@ -46,7 +46,7 @@ TraceTime::TraceTime(const char* title, _active = doit; _verbose = verbose; _title = title; - _print = NULL; + _print = nullptr; if (_active) { _accum = accumulator; @@ -56,13 +56,13 @@ TraceTime::TraceTime(const char* title, TraceTime::TraceTime(const char* title, TraceTimerLogPrintFunc ttlpf) { - _active = ttlpf!= NULL; + _active = ttlpf!= nullptr; _verbose = true; _title = title; _print = ttlpf; if (_active) { - _accum = NULL; + _accum = nullptr; _t.start(); } } @@ -72,7 +72,7 @@ TraceTime::~TraceTime() { return; } _t.stop(); - if (_accum != NULL) { + if (_accum != nullptr) { _accum->add(_t); } if (!_verbose) { diff --git a/src/hotspot/share/runtime/timerTrace.hpp b/src/hotspot/share/runtime/timerTrace.hpp index 0c03b107269b9..8c7cdc8399bc4 100644 --- a/src/hotspot/share/runtime/timerTrace.hpp +++ b/src/hotspot/share/runtime/timerTrace.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,7 +42,7 @@ typedef void (*TraceTimerLogPrintFunc)(const char*, ...); // We need to explicit take address of LogImpl<>write<> and static cast // due to MSVC is not compliant with templates two-phase lookup #define TRACETIME_LOG(TT_LEVEL, ...) \ - log_is_enabled(TT_LEVEL, __VA_ARGS__) ? static_cast(&LogImpl::write) : (TraceTimerLogPrintFunc)NULL + log_is_enabled(TT_LEVEL, __VA_ARGS__) ? static_cast(&LogImpl::write) : (TraceTimerLogPrintFunc)nullptr class TraceTime: public StackObj { private: diff --git a/src/hotspot/share/runtime/unhandledOops.cpp b/src/hotspot/share/runtime/unhandledOops.cpp index 2fc9f67b8c3d3..11c8160fe03fc 100644 --- a/src/hotspot/share/runtime/unhandledOops.cpp +++ b/src/hotspot/share/runtime/unhandledOops.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -55,7 +55,7 @@ void UnhandledOops::dump_oops(UnhandledOops *list) { // For debugging unhandled oop detector _in the debugger_ // You don't want to turn it on in compiled code here. -static Thread* unhandled_oop_print = NULL; +static Thread* unhandled_oop_print = nullptr; void UnhandledOops::register_unhandled_oop(oop* op) { if (!_thread->is_in_live_stack((address)op)) { diff --git a/src/hotspot/share/runtime/unhandledOops.hpp b/src/hotspot/share/runtime/unhandledOops.hpp index f054d21a8b652..400e6dd554382 100644 --- a/src/hotspot/share/runtime/unhandledOops.hpp +++ b/src/hotspot/share/runtime/unhandledOops.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -55,7 +55,7 @@ class UnhandledOopEntry : public CHeapObj { bool _ok_for_gc; public: oop* oop_ptr() { return _oop_ptr; } - UnhandledOopEntry() : _oop_ptr(NULL), _ok_for_gc(false) {} + UnhandledOopEntry() : _oop_ptr(nullptr), _ok_for_gc(false) {} UnhandledOopEntry(oop* op) : _oop_ptr(op), _ok_for_gc(false) {} }; diff --git a/src/hotspot/share/runtime/vframe.cpp b/src/hotspot/share/runtime/vframe.cpp index 3d4ef87da73fc..5e69cd230ba7f 100644 --- a/src/hotspot/share/runtime/vframe.cpp +++ b/src/hotspot/share/runtime/vframe.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -58,7 +58,7 @@ vframe::vframe(const frame* fr, const RegisterMap* reg_map, JavaThread* thread) : _reg_map(reg_map), _thread(thread), _chunk(Thread::current(), reg_map->stack_chunk()()) { - assert(fr != NULL, "must have frame"); + assert(fr != nullptr, "must have frame"); _fr = *fr; } @@ -70,7 +70,7 @@ vframe* vframe::new_vframe(const frame* f, const RegisterMap* reg_map, JavaThrea // Compiled frame CodeBlob* cb = f->cb(); - if (cb != NULL) { + if (cb != nullptr) { if (cb->is_compiled()) { CompiledMethod* nm = (CompiledMethod*)cb; return new compiledVFrame(f, reg_map, thread, nm); @@ -96,10 +96,10 @@ vframe* vframe::new_vframe(const frame* f, const RegisterMap* reg_map, JavaThrea vframe* vframe::sender() const { RegisterMap temp_map = *register_map(); assert(is_top(), "just checking"); - if (_fr.is_empty()) return NULL; - if (_fr.is_entry_frame() && _fr.is_first_frame()) return NULL; + if (_fr.is_empty()) return nullptr; + if (_fr.is_entry_frame() && _fr.is_first_frame()) return nullptr; frame s = _fr.real_sender(&temp_map); - if (s.is_first_frame()) return NULL; + if (s.is_first_frame()) return nullptr; return vframe::new_vframe(&s, &temp_map, thread()); } @@ -109,13 +109,13 @@ bool vframe::is_vthread_entry() const { javaVFrame* vframe::java_sender() const { vframe* f = sender(); - while (f != NULL) { + while (f != nullptr) { if (f->is_vthread_entry()) break; if (f->is_java_frame() && !javaVFrame::cast(f)->method()->is_continuation_enter_intrinsic()) return javaVFrame::cast(f); f = f->sender(); } - return NULL; + return nullptr; } // ------------- javaVFrame -------------- @@ -133,18 +133,18 @@ GrowableArray* javaVFrame::locked_monitors() { // at a safepoint or the calling thread is operating on itself so // it cannot exit the ObjectMonitor so it remains busy. ObjectMonitor *waiting_monitor = thread()->current_waiting_monitor(); - ObjectMonitor *pending_monitor = NULL; - if (waiting_monitor == NULL) { + ObjectMonitor *pending_monitor = nullptr; + if (waiting_monitor == nullptr) { pending_monitor = thread()->current_pending_monitor(); } - oop pending_obj = (pending_monitor != NULL ? pending_monitor->object() : (oop) NULL); - oop waiting_obj = (waiting_monitor != NULL ? waiting_monitor->object() : (oop) NULL); + oop pending_obj = (pending_monitor != nullptr ? pending_monitor->object() : (oop) nullptr); + oop waiting_obj = (waiting_monitor != nullptr ? waiting_monitor->object() : (oop) nullptr); for (int index = (mons->length()-1); index >= 0; index--) { MonitorInfo* monitor = mons->at(index); if (monitor->eliminated() && is_compiled_frame()) continue; // skip eliminated monitor oop obj = monitor->owner(); - if (obj == NULL) continue; // skip unowned monitor + if (obj == nullptr) continue; // skip unowned monitor // // Skip the monitor that the thread is blocked to enter or waiting on // @@ -200,7 +200,7 @@ void javaVFrame::print_lock_info_on(outputStream* st, int frame_count) { } else { st->print_cr("\t- %s ", wait_state); } - } else if (thread()->current_park_blocker() != NULL) { + } else if (thread()->current_park_blocker() != nullptr) { oop obj = thread()->current_park_blocker(); Klass* k = obj->klass(); st->print_cr("\t- %s <" INTPTR_FORMAT "> (a %s)", "parking to wait for ", p2i(obj), k->external_name()); @@ -208,7 +208,7 @@ void javaVFrame::print_lock_info_on(outputStream* st, int frame_count) { else if (thread()->osthread()->get_state() == CONDVAR_WAIT) { // We are waiting on the native class initialization monitor. InstanceKlass* k = thread()->class_to_be_initialized(); - if (k != NULL) { + if (k != nullptr) { st->print_cr("\t- waiting on the Class initialization monitor for %s", k->external_name()); } } @@ -227,13 +227,13 @@ void javaVFrame::print_lock_info_on(outputStream* st, int frame_count) { st->print_cr("\t- eliminated (a %s)", k->external_name()); } else { Handle obj(current, monitor->owner()); - if (obj() != NULL) { + if (obj() != nullptr) { print_locked_object_class_name(st, obj, "eliminated"); } } continue; } - if (monitor->owner() != NULL) { + if (monitor->owner() != nullptr) { // the monitor is associated with an object, i.e., it is locked const char *lock_state = "locked"; // assume we have the monitor locked @@ -266,18 +266,18 @@ void javaVFrame::print_lock_info_on(outputStream* st, int frame_count) { // ------------- interpretedVFrame -------------- u_char* interpretedVFrame::bcp() const { - return stack_chunk() == NULL ? fr().interpreter_frame_bcp() : stack_chunk()->interpreter_frame_bcp(fr()); + return stack_chunk() == nullptr ? fr().interpreter_frame_bcp() : stack_chunk()->interpreter_frame_bcp(fr()); } intptr_t* interpretedVFrame::locals_addr_at(int offset) const { - assert(stack_chunk() == NULL, "Not supported for heap frames"); // unsupported for now because seems to be unused + assert(stack_chunk() == nullptr, "Not supported for heap frames"); // unsupported for now because seems to be unused assert(fr().is_interpreted_frame(), "frame should be an interpreted frame"); return fr().interpreter_frame_local_at(offset); } GrowableArray* interpretedVFrame::monitors() const { GrowableArray* result = new GrowableArray(5); - if (stack_chunk() == NULL) { // no monitors in continuations + if (stack_chunk() == nullptr) { // no monitors in continuations for (BasicObjectLock* current = (fr().previous_monitor_in_interpreter_frame(fr().interpreter_frame_monitor_begin())); current >= fr().interpreter_frame_monitor_end(); current = fr().previous_monitor_in_interpreter_frame(current)) { @@ -292,7 +292,7 @@ int interpretedVFrame::bci() const { } Method* interpretedVFrame::method() const { - return stack_chunk() == NULL ? fr().interpreter_frame_method() : stack_chunk()->interpreter_frame_method(fr()); + return stack_chunk() == nullptr ? fr().interpreter_frame_method() : stack_chunk()->interpreter_frame_method(fr()); } static StackValue* create_stack_value_from_oop_map(const InterpreterOopMap& oop_mask, @@ -307,11 +307,11 @@ static StackValue* create_stack_value_from_oop_map(const InterpreterOopMap& oop_ return StackValue::create_stack_value_from_oop_location(chunk, (void*)addr); } // value (integer) "v" - return new StackValue(addr != NULL ? *addr : 0); + return new StackValue(addr != nullptr ? *addr : 0); } static bool is_in_expression_stack(const frame& fr, const intptr_t* const addr) { - assert(addr != NULL, "invariant"); + assert(addr != nullptr, "invariant"); // Ensure to be 'inside' the expression stack (i.e., addr >= sp for Intel). // In case of exceptions, the expression stack is invalid and the sp @@ -329,20 +329,20 @@ static void stack_locals(StackValueCollection* result, const frame& fr, const stackChunkOop chunk) { - assert(result != NULL, "invariant"); + assert(result != nullptr, "invariant"); for (int i = 0; i < length; ++i) { const intptr_t* addr; - if (chunk == NULL) { + if (chunk == nullptr) { addr = fr.interpreter_frame_local_at(i); assert(addr >= fr.sp(), "must be inside the frame"); } else { addr = chunk->interpreter_frame_local_at(fr, i); } - assert(addr != NULL, "invariant"); + assert(addr != nullptr, "invariant"); StackValue* const sv = create_stack_value_from_oop_map(oop_mask, i, addr, chunk); - assert(sv != NULL, "sanity check"); + assert(sv != nullptr, "sanity check"); result->add(sv); } @@ -355,16 +355,16 @@ static void stack_expressions(StackValueCollection* result, const frame& fr, const stackChunkOop chunk) { - assert(result != NULL, "invariant"); + assert(result != nullptr, "invariant"); for (int i = 0; i < length; ++i) { const intptr_t* addr; - if (chunk == NULL) { + if (chunk == nullptr) { addr = fr.interpreter_frame_expression_stack_at(i); - assert(addr != NULL, "invariant"); + assert(addr != nullptr, "invariant"); if (!is_in_expression_stack(fr, addr)) { // Need to ensure no bogus escapes. - addr = NULL; + addr = nullptr; } } else { addr = chunk->interpreter_frame_expression_stack_at(fr, i); @@ -374,7 +374,7 @@ static void stack_expressions(StackValueCollection* result, i + max_locals, addr, chunk); - assert(sv != NULL, "sanity check"); + assert(sv != nullptr, "sanity check"); result->add(sv); } @@ -433,7 +433,7 @@ StackValueCollection* interpretedVFrame::stack_data(bool expressions) const { } void interpretedVFrame::set_locals(StackValueCollection* values) const { - if (values == NULL || values->size() == 0) return; + if (values == nullptr || values->size() == 0) return; // If the method is native, max_locals is not telling the truth. // maxlocals then equals the size of parameters @@ -449,7 +449,7 @@ void interpretedVFrame::set_locals(StackValueCollection* values) const { // Depending on oop/int put it in the right package const StackValue* const sv = values->at(i); - assert(sv != NULL, "sanity check"); + assert(sv != nullptr, "sanity check"); if (sv->type() == T_OBJECT) { *(oop *) addr = (sv->get_obj())(); } else { // integer @@ -593,7 +593,7 @@ void vframeStreamCommon::skip_prefixed_method_and_wrappers() { } javaVFrame* vframeStreamCommon::asJavaVFrame() { - javaVFrame* result = NULL; + javaVFrame* result = nullptr; // FIXME, need to re-do JDK-8271140 and check is_native_frame? if (_mode == compiled_mode && _frame.is_compiled_frame()) { assert(_frame.is_compiled_frame() || _frame.is_native_frame(), "expected compiled Java frame"); @@ -619,7 +619,7 @@ javaVFrame* vframeStreamCommon::asJavaVFrame() { #ifndef PRODUCT void vframe::print() { - if (WizardMode) _fr.print_value_on(tty,NULL); + if (WizardMode) _fr.print_value_on(tty,nullptr); } void vframe::print_value() const { @@ -670,7 +670,7 @@ void javaVFrame::print() { if (monitor->owner_is_scalar_replaced()) { Klass* k = java_lang_Class::as_Klass(monitor->owner_klass()); tty->print("( is scalar replaced %s)", k->external_name()); - } else if (monitor->owner() == NULL) { + } else if (monitor->owner() == nullptr) { tty->print("( null )"); } else { monitor->owner()->print_value(); @@ -701,14 +701,14 @@ void javaVFrame::print_value() const { if (!m->is_native()) { Symbol* source_name = k->source_file_name(); int line_number = m->line_number_from_bci(bci()); - if (source_name != NULL && (line_number != -1)) { + if (source_name != nullptr && (line_number != -1)) { tty->print("(%s:%d)", source_name->as_C_string(), line_number); } } else { tty->print("(Native Method)"); } // Check frame size and print warning if it looks suspiciously large - if (fr().sp() != NULL) { + if (fr().sp() != nullptr) { RegisterMap map = *register_map(); uint size = fr().frame_size(); #ifdef _LP64 @@ -734,7 +734,7 @@ void javaVFrame::print_activation(int index) const { // ------------- externalVFrame -------------- void externalVFrame::print() { - _fr.print_value_on(tty,NULL); + _fr.print_value_on(tty,nullptr); } void externalVFrame::print_value() const { diff --git a/src/hotspot/share/runtime/vframe.hpp b/src/hotspot/share/runtime/vframe.hpp index 988f62693a8d3..2f2ab48ac433a 100644 --- a/src/hotspot/share/runtime/vframe.hpp +++ b/src/hotspot/share/runtime/vframe.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -130,7 +130,7 @@ class javaVFrame: public vframe { public: // casting static javaVFrame* cast(vframe* vf) { - assert(vf == NULL || vf->is_java_frame(), "must be java frame"); + assert(vf == nullptr || vf->is_java_frame(), "must be java frame"); return (javaVFrame*) vf; } @@ -175,7 +175,7 @@ class interpretedVFrame: public javaVFrame { // casting static interpretedVFrame* cast(vframe* vf) { - assert(vf == NULL || vf->is_interpreted_frame(), "must be interpreted frame"); + assert(vf == nullptr || vf->is_interpreted_frame(), "must be interpreted frame"); return (interpretedVFrame*) vf; } @@ -294,7 +294,7 @@ class vframeStreamCommon : StackObj { CodeBlob* cb() const { return _frame.cb(); } CompiledMethod* nm() const { - assert( cb() != NULL && cb()->is_compiled(), "usage"); + assert( cb() != nullptr && cb()->is_compiled(), "usage"); return (CompiledMethod*) cb(); } diff --git a/src/hotspot/share/runtime/vframe.inline.hpp b/src/hotspot/share/runtime/vframe.inline.hpp index c6c1297626876..ae66ea98003fd 100644 --- a/src/hotspot/share/runtime/vframe.inline.hpp +++ b/src/hotspot/share/runtime/vframe.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,17 +34,17 @@ #include "runtime/handles.inline.hpp" #include "runtime/javaThread.inline.hpp" -inline vframeStreamCommon::vframeStreamCommon(RegisterMap reg_map) : _reg_map(reg_map), _cont_entry(NULL) { +inline vframeStreamCommon::vframeStreamCommon(RegisterMap reg_map) : _reg_map(reg_map), _cont_entry(nullptr) { _thread = _reg_map.thread(); } inline oop vframeStreamCommon::continuation() const { - if (_reg_map.cont() != NULL) { + if (_reg_map.cont() != nullptr) { return _reg_map.cont(); - } else if (_cont_entry != NULL) { + } else if (_cont_entry != nullptr) { return _cont_entry->cont_oop(_reg_map.thread()); } else { - return NULL; + return nullptr; } } @@ -79,9 +79,9 @@ inline void vframeStreamCommon::next() { bool is_enterSpecial_frame = false; if (Continuation::is_continuation_enterSpecial(_frame)) { assert(!_reg_map.in_cont(), ""); - assert(_cont_entry != NULL, ""); + assert(_cont_entry != nullptr, ""); // Reading oops are only safe if process_frames() is true, and we fix the oops. - assert(!_reg_map.process_frames() || _cont_entry->cont_oop(_reg_map.thread()) != NULL, "_cont: " INTPTR_FORMAT, p2i(_cont_entry)); + assert(!_reg_map.process_frames() || _cont_entry->cont_oop(_reg_map.thread()) != nullptr, "_cont: " INTPTR_FORMAT, p2i(_cont_entry)); is_enterSpecial_frame = true; // TODO: handle ShowCarrierFrames @@ -91,7 +91,7 @@ inline void vframeStreamCommon::next() { break; } } else if (_reg_map.in_cont() && Continuation::is_continuation_entry_frame(_frame, &_reg_map)) { - assert(_reg_map.cont() != NULL, ""); + assert(_reg_map.cont() != nullptr, ""); oop scope = jdk_internal_vm_Continuation::scope(_reg_map.cont()); if (scope == java_lang_VirtualThread::vthread_scope() || (_continuation_scope.not_null() && scope == _continuation_scope())) { @@ -204,15 +204,15 @@ inline bool vframeStreamCommon::fill_from_frame() { // Compiled frame - if (cb() != NULL && cb()->is_compiled()) { - assert(nm()->method() != NULL, "must be"); + if (cb() != nullptr && cb()->is_compiled()) { + assert(nm()->method() != nullptr, "must be"); if (nm()->is_native_method()) { // Do not rely on scopeDesc since the pc might be imprecise due to the _last_native_pc trick. fill_from_compiled_native_frame(); } else { PcDesc* pc_desc = nm()->pc_desc_at(_frame.pc()); int decode_offset; - if (pc_desc == NULL) { + if (pc_desc == nullptr) { // Should not happen, but let fill_from_compiled_frame handle it. // If we are trying to walk the stack of a thread that is not @@ -229,7 +229,7 @@ inline bool vframeStreamCommon::fill_from_frame() { // fill_from_compiled_frame handle it. - JavaThreadState state = _thread != NULL ? _thread->thread_state() : _thread_in_Java; + JavaThreadState state = _thread != nullptr ? _thread->thread_state() : _thread_in_Java; // in_Java should be good enough to test safepoint safety // if state were say in_Java_trans then we'd expect that diff --git a/src/hotspot/share/runtime/vframeArray.cpp b/src/hotspot/share/runtime/vframeArray.cpp index 5521b7a15523e..27391919b3c93 100644 --- a/src/hotspot/share/runtime/vframeArray.cpp +++ b/src/hotspot/share/runtime/vframeArray.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -49,9 +49,9 @@ int vframeArrayElement:: bci(void) const { return (_bci == SynchronizationEntryBCI ? 0 : _bci); } void vframeArrayElement::free_monitors(JavaThread* jt) { - if (_monitors != NULL) { + if (_monitors != nullptr) { MonitorChunk* chunk = _monitors; - _monitors = NULL; + _monitors = nullptr; jt->remove_monitor_chunk(chunk); delete chunk; } @@ -80,7 +80,7 @@ void vframeArrayElement::fill_in(compiledVFrame* vf, bool realloc_failures) { GrowableArray* list = vf->monitors(); if (list->is_empty()) { - _monitors = NULL; + _monitors = nullptr; } else { // Allocate monitor chunk @@ -93,9 +93,9 @@ void vframeArrayElement::fill_in(compiledVFrame* vf, bool realloc_failures) { assert(!monitor->owner_is_scalar_replaced() || realloc_failures, "object should be reallocated already"); BasicObjectLock* dest = _monitors->at(index); if (monitor->owner_is_scalar_replaced()) { - dest->set_obj(NULL); + dest->set_obj(nullptr); } else { - assert(monitor->owner() == NULL || !monitor->owner()->is_unlocked(), "object must be null or locked"); + assert(monitor->owner() == nullptr || !monitor->owner()->is_unlocked(), "object must be null or locked"); dest->set_obj(monitor->owner()); monitor->lock()->move_to(monitor->owner(), dest->lock()); } @@ -218,7 +218,7 @@ void vframeArrayElement::unpack_on_stack(int caller_actual_parameters, // For realloc failure exception we just pop frames, skip the guarantee. assert(*bcp != Bytecodes::_monitorenter || is_top_frame, "a _monitorenter must be a top frame"); - assert(thread->deopt_compiled_method() != NULL, "compiled method should be known"); + assert(thread->deopt_compiled_method() != nullptr, "compiled method should be known"); guarantee(realloc_failure_exception || !(thread->deopt_compiled_method()->is_compiled_by_c2() && *bcp == Bytecodes::_monitorenter && exec_mode == Deoptimization::Unpack_exception), @@ -242,14 +242,14 @@ void vframeArrayElement::unpack_on_stack(int caller_actual_parameters, // Deoptimization::fetch_unroll_info_helper popframe_preserved_args_size_in_words = in_words(thread->popframe_preserved_args_size_in_words()); } - } else if (!realloc_failure_exception && JvmtiExport::can_force_early_return() && state != NULL && + } else if (!realloc_failure_exception && JvmtiExport::can_force_early_return() && state != nullptr && state->is_earlyret_pending()) { // Force early return from top frame after deoptimization pc = Interpreter::remove_activation_early_entry(state->earlyret_tos()); } else { - if (realloc_failure_exception && JvmtiExport::can_force_early_return() && state != NULL && state->is_earlyret_pending()) { + if (realloc_failure_exception && JvmtiExport::can_force_early_return() && state != nullptr && state->is_earlyret_pending()) { state->clr_earlyret_pending(); - state->set_earlyret_oop(NULL); + state->set_earlyret_oop(nullptr); state->clr_earlyret_value(); } // Possibly override the previous pc computation of the top (youngest) frame @@ -278,10 +278,10 @@ void vframeArrayElement::unpack_on_stack(int caller_actual_parameters, // Setup the interpreter frame - assert(method() != NULL, "method must exist"); + assert(method() != nullptr, "method must exist"); int temps = expressions()->size(); - int locks = monitors() == NULL ? 0 : monitors()->number_of_monitors(); + int locks = monitors() == nullptr ? 0 : monitors()->number_of_monitors(); Interpreter::layout_activation(method(), temps + callee_parameters, @@ -316,7 +316,7 @@ void vframeArrayElement::unpack_on_stack(int caller_actual_parameters, iframe()->interpreter_frame_set_bcp(bcp); if (ProfileInterpreter) { MethodData* mdo = method()->method_data(); - if (mdo != NULL) { + if (mdo != nullptr) { int bci = iframe()->interpreter_frame_bci(); if (use_next_mdp) ++bci; address mdp = mdo->bci_to_dp(bci); @@ -355,8 +355,8 @@ void vframeArrayElement::unpack_on_stack(int caller_actual_parameters, if (PrintDeoptimizationDetails) { tty->print(" - Reconstructed expression %d (OBJECT): ", i); oop o = cast_to_oop((address)(*addr)); - if (o == NULL) { - tty->print_cr("NULL"); + if (o == nullptr) { + tty->print_cr("null"); } else { ResourceMark rm; tty->print_raw_cr(o->klass()->name()->as_C_string()); @@ -399,8 +399,8 @@ void vframeArrayElement::unpack_on_stack(int caller_actual_parameters, if (PrintDeoptimizationDetails) { tty->print(" - Reconstructed local %d (OBJECT): ", i); oop o = cast_to_oop((address)(*addr)); - if (o == NULL) { - tty->print_cr("NULL"); + if (o == nullptr) { + tty->print_cr("null"); } else { ResourceMark rm; tty->print_raw_cr(o->klass()->name()->as_C_string()); @@ -409,7 +409,7 @@ void vframeArrayElement::unpack_on_stack(int caller_actual_parameters, #endif // !PRODUCT break; case T_CONFLICT: - // A dead location. If it is an oop then we need a NULL to prevent GC from following it + // A dead location. If it is an oop then we need a null to prevent GC from following it *addr = NULL_WORD; break; default: @@ -427,7 +427,7 @@ void vframeArrayElement::unpack_on_stack(int caller_actual_parameters, // addresses. if (popframe_preserved_args_size_in_words != 0) { void* saved_args = thread->popframe_preserved_args(); - assert(saved_args != NULL, "must have been saved by interpreter"); + assert(saved_args != nullptr, "must have been saved by interpreter"); #ifdef ASSERT assert(popframe_preserved_args_size_in_words <= iframe()->interpreter_frame_expression_stack_size()*Interpreter::stackElementWords, @@ -467,7 +467,7 @@ void vframeArrayElement::unpack_on_stack(int caller_actual_parameters, // a dangling pointer in the vframeArray we leave around for debug // purposes - _locals = _expressions = NULL; + _locals = _expressions = nullptr; } @@ -476,7 +476,7 @@ int vframeArrayElement::on_stack_size(int callee_parameters, bool is_top_frame, int popframe_extra_stack_expression_els) const { assert(method()->max_locals() == locals()->size(), "just checking"); - int locks = monitors() == NULL ? 0 : monitors()->number_of_monitors(); + int locks = monitors() == nullptr ? 0 : monitors()->number_of_monitors(); int temps = expressions()->size(); return Interpreter::size_activation(method()->max_stack(), temps + callee_parameters, @@ -506,7 +506,7 @@ vframeArray* vframeArray::allocate(JavaThread* thread, int frame_size, GrowableA result->_sender = sender; result->_caller = caller; result->_original = self; - result->set_unroll_block(NULL); // initialize it + result->set_unroll_block(nullptr); // initialize it result->fill_in(thread, frame_size, chunk, reg_map, realloc_failures); return result; } @@ -524,7 +524,7 @@ void vframeArray::fill_in(JavaThread* thread, } // Copy registers for callee-saved registers - if (reg_map != NULL) { + if (reg_map != nullptr) { for(int i = 0; i < RegisterMap::reg_count; i++) { #ifdef AMD64 // The register map has one entry for every int (32-bit value), so @@ -538,16 +538,16 @@ void vframeArray::fill_in(JavaThread* thread, // in amd64.ad. // if (VMReg::Name(i) < SharedInfo::stack0 && is_even(i)) { intptr_t* src = (intptr_t*) reg_map->location(VMRegImpl::as_VMReg(i), _caller.sp()); - _callee_registers[i] = src != NULL ? *src : NULL_WORD; + _callee_registers[i] = src != nullptr ? *src : NULL_WORD; // } else { // jint* src = (jint*) reg_map->location(VMReg::Name(i)); - // _callee_registers[i] = src != NULL ? *src : NULL_WORD; + // _callee_registers[i] = src != nullptr ? *src : NULL_WORD; // } #else jint* src = (jint*) reg_map->location(VMRegImpl::as_VMReg(i), _caller.sp()); - _callee_registers[i] = src != NULL ? *src : NULL_WORD; + _callee_registers[i] = src != nullptr ? *src : NULL_WORD; #endif - if (src != NULL) { + if (src != nullptr) { jint* dst = (jint*) register_location(i); *dst = *src; } diff --git a/src/hotspot/share/runtime/vframe_hp.cpp b/src/hotspot/share/runtime/vframe_hp.cpp index 54d0fd97b238c..b508b09104776 100644 --- a/src/hotspot/share/runtime/vframe_hp.cpp +++ b/src/hotspot/share/runtime/vframe_hp.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -54,9 +54,9 @@ StackValueCollection* compiledVFrame::locals() const { // Natives has no scope - if (scope() == NULL) return new StackValueCollection(0); + if (scope() == nullptr) return new StackValueCollection(0); GrowableArray* scv_list = scope()->locals(); - if (scv_list == NULL) return new StackValueCollection(0); + if (scv_list == nullptr) return new StackValueCollection(0); // scv_list is the list of ScopeValues describing the JVM stack state. // There is one scv_list entry for every JVM stack state in use. @@ -70,7 +70,7 @@ StackValueCollection* compiledVFrame::locals() const { // performed through compiledVFrame::update_locals. if (!register_map()->in_cont()) { // LOOM TODO GrowableArray* list = JvmtiDeferredUpdates::deferred_locals(thread()); - if (list != NULL ) { + if (list != nullptr ) { // In real life this never happens or is typically a single element search for (int i = 0; i < list->length(); i++) { if (list->at(i)->matches(this)) { @@ -111,8 +111,8 @@ void compiledVFrame::update_deferred_value(BasicType type, int index, jvalue val assert(fr().is_deoptimized_frame(), "frame must be scheduled for deoptimization"); assert(!Continuation::is_frame_in_continuation(thread(), fr()), "No support for deferred values in continuations"); GrowableArray* deferred = JvmtiDeferredUpdates::deferred_locals(thread()); - jvmtiDeferredLocalVariableSet* locals = NULL; - if (deferred != NULL ) { + jvmtiDeferredLocalVariableSet* locals = nullptr; + if (deferred != nullptr ) { // See if this vframe has already had locals with deferred writes for (int f = 0; f < deferred->length(); f++ ) { if (deferred->at(f)->matches(this)) { @@ -127,7 +127,7 @@ void compiledVFrame::update_deferred_value(BasicType type, int index, jvalue val JvmtiDeferredUpdates::create_for(thread()); deferred = JvmtiDeferredUpdates::deferred_locals(thread()); } - if (locals == NULL) { + if (locals == nullptr) { locals = new jvmtiDeferredLocalVariableSet(method(), bci(), fr().id(), vframe_id()); deferred->push(locals); assert(locals->id() == fr().id(), "Huh? Must match"); @@ -147,7 +147,7 @@ void compiledVFrame::create_deferred_updates_after_object_deoptimization() { // locals GrowableArray* scopedValues = scope()->locals(); StackValueCollection* lcls = locals(); - if (lcls != NULL) { + if (lcls != nullptr) { for (int i2 = 0; i2 < lcls->size(); i2++) { StackValue* var = lcls->at(i2); if (var->type() == T_OBJECT && scopedValues->at(i2)->is_object()) { @@ -161,7 +161,7 @@ void compiledVFrame::create_deferred_updates_after_object_deoptimization() { // expressions GrowableArray* scopeExpressions = scope()->expressions(); StackValueCollection* exprs = expressions(); - if (exprs != NULL) { + if (exprs != nullptr) { for (int i2 = 0; i2 < exprs->size(); i2++) { StackValue* var = exprs->at(i2); if (var->type() == T_OBJECT && scopeExpressions->at(i2)->is_object()) { @@ -174,7 +174,7 @@ void compiledVFrame::create_deferred_updates_after_object_deoptimization() { // monitors GrowableArray* mtrs = monitors(); - if (mtrs != NULL) { + if (mtrs != nullptr) { for (int i2 = 0; i2 < mtrs->length(); i2++) { if (mtrs->at(i2)->eliminated()) { assert(!mtrs->at(i2)->owner_is_scalar_replaced(), @@ -187,9 +187,9 @@ void compiledVFrame::create_deferred_updates_after_object_deoptimization() { StackValueCollection* compiledVFrame::expressions() const { // Natives has no scope - if (scope() == NULL) return new StackValueCollection(0); + if (scope() == nullptr) return new StackValueCollection(0); GrowableArray* scv_list = scope()->expressions(); - if (scv_list == NULL) return new StackValueCollection(0); + if (scv_list == nullptr) return new StackValueCollection(0); // scv_list is the list of ScopeValues describing the JVM stack state. // There is one scv_list entry for every JVM stack state in use. @@ -203,7 +203,7 @@ StackValueCollection* compiledVFrame::expressions() const { // Replace the original values with any stores that have been // performed through compiledVFrame::update_stack. GrowableArray* list = JvmtiDeferredUpdates::deferred_locals(thread()); - if (list != NULL ) { + if (list != nullptr ) { // In real life this never happens or is typically a single element search for (int i = 0; i < list->length(); i++) { if (list->at(i)->matches(this)) { @@ -241,7 +241,7 @@ BasicLock* compiledVFrame::resolve_monitor_lock(Location location) const { GrowableArray* compiledVFrame::monitors() const { // Natives has no scope - if (scope() == NULL) { + if (scope() == nullptr) { CompiledMethod* nm = code(); Method* method = nm->method(); assert(method->is_native(), "Expect a native method"); @@ -259,7 +259,7 @@ GrowableArray* compiledVFrame::monitors() const { return monitors; } GrowableArray* monitors = scope()->monitors(); - if (monitors == NULL) { + if (monitors == nullptr) { return new GrowableArray(0); } GrowableArray* result = new GrowableArray(monitors->length()); @@ -285,7 +285,7 @@ GrowableArray* compiledVFrame::monitors() const { // Replace the original values with any stores that have been // performed through compiledVFrame::update_monitors. GrowableArrayView* list = JvmtiDeferredUpdates::deferred_locals(thread()); - if (list != NULL ) { + if (list != nullptr ) { // In real life this never happens or is typically a single element search for (int i = 0; i < list->length(); i++) { if (list->at(i)->matches(this)) { @@ -301,7 +301,7 @@ GrowableArray* compiledVFrame::monitors() const { compiledVFrame::compiledVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread, CompiledMethod* nm) : javaVFrame(fr, reg_map, thread) { - _scope = NULL; + _scope = nullptr; _vframe_id = 0; // Compiled method (native stub or Java code) // native wrappers have no scope data, it is implied @@ -314,7 +314,7 @@ compiledVFrame::compiledVFrame(const frame* fr, const RegisterMap* reg_map, Java : javaVFrame(fr, reg_map, thread) { _scope = scope; _vframe_id = vframe_id; - guarantee(_scope != NULL, "scope must be present"); + guarantee(_scope != nullptr, "scope must be present"); } compiledVFrame* compiledVFrame::at_scope(int decode_offset, int vframe_id) { @@ -328,7 +328,7 @@ compiledVFrame* compiledVFrame::at_scope(int decode_offset, int vframe_id) { bool compiledVFrame::is_top() const { // FIX IT: Remove this when new native stubs are in place - if (scope() == NULL) return true; + if (scope() == nullptr) return true; return scope()->is_top(); } @@ -339,7 +339,7 @@ CompiledMethod* compiledVFrame::code() const { Method* compiledVFrame::method() const { - if (scope() == NULL) { + if (scope() == nullptr) { // native nmethods have no scope the method is implied nmethod* nm = code()->as_nmethod(); assert(nm->is_native_method(), "must be native"); @@ -356,7 +356,7 @@ int compiledVFrame::bci() const { int compiledVFrame::raw_bci() const { - if (scope() == NULL) { + if (scope() == nullptr) { // native nmethods have no scope the method/bci is implied nmethod* nm = code()->as_nmethod(); assert(nm->is_native_method(), "must be native"); @@ -366,7 +366,7 @@ int compiledVFrame::raw_bci() const { } bool compiledVFrame::should_reexecute() const { - if (scope() == NULL) { + if (scope() == nullptr) { // native nmethods have no scope the method/bci is implied nmethod* nm = code()->as_nmethod(); assert(nm->is_native_method(), "must be native"); @@ -376,16 +376,16 @@ bool compiledVFrame::should_reexecute() const { } bool compiledVFrame::has_ea_local_in_scope() const { - if (scope() == NULL) { + if (scope() == nullptr) { // native nmethod, all objs escape assert(code()->as_nmethod()->is_native_method(), "must be native"); return false; } - return (scope()->objects() != NULL) || scope()->has_ea_local_in_scope(); + return (scope()->objects() != nullptr) || scope()->has_ea_local_in_scope(); } bool compiledVFrame::arg_escape() const { - if (scope() == NULL) { + if (scope() == nullptr) { // native nmethod, all objs escape assert(code()->as_nmethod()->is_native_method(), "must be native"); return false; @@ -395,7 +395,7 @@ bool compiledVFrame::arg_escape() const { vframe* compiledVFrame::sender() const { const frame f = fr(); - if (scope() == NULL) { + if (scope() == nullptr) { // native nmethods have no scope the method/bci is implied nmethod* nm = code()->as_nmethod(); assert(nm->is_native_method(), "must be native"); diff --git a/src/hotspot/share/runtime/vframe_hp.hpp b/src/hotspot/share/runtime/vframe_hp.hpp index 3bee308b1ed07..23818e415446a 100644 --- a/src/hotspot/share/runtime/vframe_hp.hpp +++ b/src/hotspot/share/runtime/vframe_hp.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -52,7 +52,7 @@ class compiledVFrame: public javaVFrame { // Casting static compiledVFrame* cast(vframe* vf) { - assert(vf == NULL || vf->is_compiled_frame(), "must be compiled frame"); + assert(vf == nullptr || vf->is_compiled_frame(), "must be compiled frame"); return (compiledVFrame*) vf; } diff --git a/src/hotspot/share/runtime/vmOperation.hpp b/src/hotspot/share/runtime/vmOperation.hpp index 774263ccded16..bd81b9548046a 100644 --- a/src/hotspot/share/runtime/vmOperation.hpp +++ b/src/hotspot/share/runtime/vmOperation.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -124,7 +124,7 @@ class VM_Operation : public StackObj { static const char* _names[]; public: - VM_Operation() : _calling_thread(NULL) {} + VM_Operation() : _calling_thread(nullptr) {} // VM operation support (used by VM thread) Thread* calling_thread() const { return _calling_thread; } diff --git a/src/hotspot/share/runtime/vmOperations.cpp b/src/hotspot/share/runtime/vmOperations.cpp index 6e2e70b825d28..a04ba757500ec 100644 --- a/src/hotspot/share/runtime/vmOperations.cpp +++ b/src/hotspot/share/runtime/vmOperations.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -185,9 +185,9 @@ void VM_PrintMetadata::doit() { } VM_FindDeadlocks::~VM_FindDeadlocks() { - if (_deadlocks != NULL) { + if (_deadlocks != nullptr) { DeadlockCycle* cycle = _deadlocks; - while (cycle != NULL) { + while (cycle != nullptr) { DeadlockCycle* d = cycle; cycle = cycle->next(); delete d; @@ -204,9 +204,9 @@ void VM_FindDeadlocks::doit() { _setter.set(); _deadlocks = ThreadService::find_deadlocks_at_safepoint(_setter.list(), _concurrent_locks); - if (_out != NULL) { + if (_out != nullptr) { int num_deadlocks = 0; - for (DeadlockCycle* cycle = _deadlocks; cycle != NULL; cycle = cycle->next()) { + for (DeadlockCycle* cycle = _deadlocks; cycle != nullptr; cycle = cycle->next()) { num_deadlocks++; cycle->print_on_with(_setter.list(), _out); } @@ -227,7 +227,7 @@ VM_ThreadDump::VM_ThreadDump(ThreadDumpResult* result, bool with_locked_synchronizers) { _result = result; _num_threads = 0; // 0 indicates all threads - _threads = NULL; + _threads = nullptr; _result = result; _max_depth = max_depth; _with_locked_monitors = with_locked_monitors; @@ -301,7 +301,7 @@ void VM_ThreadDump::doit() { // skip terminating threads and hidden threads continue; } - ThreadConcurrentLocks* tcl = NULL; + ThreadConcurrentLocks* tcl = nullptr; if (_with_locked_synchronizers) { tcl = concurrent_locks.thread_concurrent_locks(jt); } @@ -313,7 +313,7 @@ void VM_ThreadDump::doit() { for (int i = 0; i < _num_threads; i++) { instanceHandle th = _threads->at(i); - if (th() == NULL) { + if (th() == nullptr) { // skip if the thread doesn't exist // Add a dummy snapshot _result->add_thread_snapshot(); @@ -323,20 +323,20 @@ void VM_ThreadDump::doit() { // Dump thread stack only if the thread is alive and not exiting // and not VM internal thread. JavaThread* jt = java_lang_Thread::thread(th()); - if (jt != NULL && !_result->t_list()->includes(jt)) { + if (jt != nullptr && !_result->t_list()->includes(jt)) { // _threads[i] doesn't refer to a valid JavaThread; this check // is primarily for JVM_DumpThreads() which doesn't have a good // way to validate the _threads array. - jt = NULL; + jt = nullptr; } - if (jt == NULL || /* thread not alive */ + if (jt == nullptr || /* thread not alive */ jt->is_exiting() || jt->is_hidden_from_external_view()) { - // add a NULL snapshot if skipped + // add a nullptr snapshot if skipped _result->add_thread_snapshot(); continue; } - ThreadConcurrentLocks* tcl = NULL; + ThreadConcurrentLocks* tcl = nullptr; if (_with_locked_synchronizers) { tcl = concurrent_locks.thread_concurrent_locks(jt); } @@ -353,7 +353,7 @@ void VM_ThreadDump::snapshot_thread(JavaThread* java_thread, ThreadConcurrentLoc } volatile bool VM_Exit::_vm_exited = false; -Thread * volatile VM_Exit::_shutdown_thread = NULL; +Thread * volatile VM_Exit::_shutdown_thread = nullptr; int VM_Exit::set_vm_exited() { @@ -409,7 +409,7 @@ int VM_Exit::wait_for_threads_in_native_to_block() { if (thr->is_Compiler_thread()) { #if INCLUDE_JVMCI CompilerThread* ct = (CompilerThread*) thr; - if (ct->compiler() == NULL || !ct->compiler()->is_jvmci()) { + if (ct->compiler() == nullptr || !ct->compiler()->is_jvmci()) { num_active_compiler_thread++; } else { // A JVMCI compiler thread never accesses VM data structures @@ -479,7 +479,7 @@ void VM_Exit::doit() { // Check for exit hook exit_hook_t exit_hook = Arguments::exit_hook(); - if (exit_hook != NULL) { + if (exit_hook != nullptr) { // exit hook should exit. exit_hook(_exit_code); // ... but if it didn't, we must do it here diff --git a/src/hotspot/share/runtime/vmOperations.hpp b/src/hotspot/share/runtime/vmOperations.hpp index c4dddd457d806..152d72a0400cf 100644 --- a/src/hotspot/share/runtime/vmOperations.hpp +++ b/src/hotspot/share/runtime/vmOperations.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -181,8 +181,8 @@ class VM_FindDeadlocks: public VM_Operation { // which protects the JavaThreads in _deadlocks. public: - VM_FindDeadlocks(bool concurrent_locks) : _concurrent_locks(concurrent_locks), _deadlocks(NULL), _out(NULL), _setter() {}; - VM_FindDeadlocks(outputStream* st) : _concurrent_locks(true), _deadlocks(NULL), _out(st) {}; + VM_FindDeadlocks(bool concurrent_locks) : _concurrent_locks(concurrent_locks), _deadlocks(nullptr), _out(nullptr), _setter() {}; + VM_FindDeadlocks(outputStream* st) : _concurrent_locks(true), _deadlocks(nullptr), _out(st) {}; ~VM_FindDeadlocks(); DeadlockCycle* result() { return _deadlocks; }; diff --git a/src/hotspot/share/runtime/vmStructs.cpp b/src/hotspot/share/runtime/vmStructs.cpp index 673cdb1b33733..fdb2eba7fe497 100644 --- a/src/hotspot/share/runtime/vmStructs.cpp +++ b/src/hotspot/share/runtime/vmStructs.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -3076,7 +3076,7 @@ void VMStructs::init() { static int recursiveFindType(VMTypeEntry* origtypes, const char* typeName, bool isRecurse) { { VMTypeEntry* types = origtypes; - while (types->typeName != NULL) { + while (types->typeName != nullptr) { if (strcmp(typeName, types->typeName) == 0) { // Found it return 1; @@ -3097,13 +3097,13 @@ static int recursiveFindType(VMTypeEntry* origtypes, const char* typeName, bool } FREE_C_HEAP_ARRAY(char, s); } - const char* start = NULL; + const char* start = nullptr; if (strstr(typeName, "GrowableArray<") == typeName) { start = typeName + strlen("GrowableArray<"); } else if (strstr(typeName, "Array<") == typeName) { start = typeName + strlen("Array<"); } - if (start != NULL) { + if (start != nullptr) { const char * end = strrchr(typeName, '>'); int len = end - start + 1; char * s = NEW_C_HEAP_ARRAY(char, len, mtInternal); diff --git a/src/hotspot/share/runtime/vmStructs.hpp b/src/hotspot/share/runtime/vmStructs.hpp index 7b0425b17c96f..02f09bd465b9c 100644 --- a/src/hotspot/share/runtime/vmStructs.hpp +++ b/src/hotspot/share/runtime/vmStructs.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -106,27 +106,27 @@ typedef struct { class VMStructs { public: // The last entry is identified over in the serviceability agent by - // the fact that it has a NULL fieldName + // the fact that it has a null fieldName static VMStructEntry localHotSpotVMStructs[]; // The function to get localHotSpotVMStructs length static size_t localHotSpotVMStructsLength() NOT_VM_STRUCTS_RETURN_(0); // The last entry is identified over in the serviceability agent by - // the fact that it has a NULL typeName + // the fact that it has a null typeName static VMTypeEntry localHotSpotVMTypes[]; // The function to get localHotSpotVMTypes length static size_t localHotSpotVMTypesLength() NOT_VM_STRUCTS_RETURN_(0); // Table of integer constants required by the serviceability agent. // The last entry is identified over in the serviceability agent by - // the fact that it has a NULL typeName + // the fact that it has a null typeName static VMIntConstantEntry localHotSpotVMIntConstants[]; // The function to get localHotSpotVMIntConstants length static size_t localHotSpotVMIntConstantsLength() NOT_VM_STRUCTS_RETURN_(0); // Table of long constants required by the serviceability agent. // The last entry is identified over in the serviceability agent by - // the fact that it has a NULL typeName + // the fact that it has a null typeName static VMLongConstantEntry localHotSpotVMLongConstants[]; // The function to get localHotSpotVMIntConstants length static size_t localHotSpotVMLongConstantsLength() NOT_VM_STRUCTS_RETURN_(0); @@ -157,7 +157,7 @@ class VMStructs { // This macro generates a VMStructEntry line for a nonstatic field #define GENERATE_NONSTATIC_VM_STRUCT_ENTRY(typeName, fieldName, type) \ - { QUOTE(typeName), QUOTE(fieldName), QUOTE(type), 0, offset_of(typeName, fieldName), NULL }, + { QUOTE(typeName), QUOTE(fieldName), QUOTE(type), 0, offset_of(typeName, fieldName), nullptr }, // This macro generates a VMStructEntry line for a static field #define GENERATE_STATIC_VM_STRUCT_ENTRY(typeName, fieldName, type) \ @@ -170,19 +170,19 @@ class VMStructs { // This macro generates a VMStructEntry line for an unchecked // nonstatic field, in which the size of the type is also specified. -// The type string is given as NULL, indicating an "opaque" type. +// The type string is given as null, indicating an "opaque" type. #define GENERATE_UNCHECKED_NONSTATIC_VM_STRUCT_ENTRY(typeName, fieldName, size) \ - { QUOTE(typeName), QUOTE(fieldName), NULL, 0, offset_of(typeName, fieldName), NULL }, + { QUOTE(typeName), QUOTE(fieldName), nullptr, 0, offset_of(typeName, fieldName), nullptr }, // This macro generates a VMStructEntry line for an unchecked // static field, in which the size of the type is also specified. -// The type string is given as NULL, indicating an "opaque" type. +// The type string is given as null, indicating an "opaque" type. #define GENERATE_UNCHECKED_STATIC_VM_STRUCT_ENTRY(typeName, fieldName, size) \ - { QUOTE(typeName), QUOTE(fieldName), NULL, 1, 0, (void*) &typeName::fieldName }, + { QUOTE(typeName), QUOTE(fieldName), nullptr, 1, 0, (void*) &typeName::fieldName }, // This macro generates the sentinel value indicating the end of the list #define GENERATE_VM_STRUCT_LAST_ENTRY() \ - { NULL, NULL, NULL, 0, 0, NULL } + { nullptr, nullptr, nullptr, 0, 0, nullptr } #ifdef ASSERT @@ -233,22 +233,22 @@ class VMStructs { { QUOTE(type), QUOTE(superclass), 0, 0, 0, sizeof(type) }, #define GENERATE_TOPLEVEL_VM_TYPE_ENTRY(type) \ - { QUOTE(type), NULL, 0, 0, 0, sizeof(type) }, + { QUOTE(type), nullptr, 0, 0, 0, sizeof(type) }, #define GENERATE_OOP_VM_TYPE_ENTRY(type) \ - { QUOTE(type), NULL, 1, 0, 0, sizeof(type) }, + { QUOTE(type), nullptr, 1, 0, 0, sizeof(type) }, #define GENERATE_INTEGER_VM_TYPE_ENTRY(type) \ - { QUOTE(type), NULL, 0, 1, 0, sizeof(type) }, + { QUOTE(type), nullptr, 0, 1, 0, sizeof(type) }, #define GENERATE_UNSIGNED_INTEGER_VM_TYPE_ENTRY(type) \ - { QUOTE(type), NULL, 0, 1, 1, sizeof(type) }, + { QUOTE(type), nullptr, 0, 1, 1, sizeof(type) }, #define GENERATE_VM_TYPE_LAST_ENTRY() \ - { NULL, NULL, 0, 0, 0, 0 } + { nullptr, nullptr, 0, 0, 0, 0 } #define CHECK_VM_TYPE_ENTRY(type, superclass) \ - { type* dummyObj = NULL; superclass* dummySuperObj = dummyObj; } + { type* dummyObj = nullptr; superclass* dummySuperObj = dummyObj; } #define CHECK_VM_TYPE_NO_OP(a) #define CHECK_SINGLE_ARG_VM_TYPE_NO_OP(a) @@ -269,7 +269,7 @@ class VMStructs { // This macro generates the sentinel value indicating the end of the list #define GENERATE_VM_INT_CONSTANT_LAST_ENTRY() \ - { NULL, 0 } + { nullptr, 0 } //-------------------------------------------------------------------------------- @@ -284,7 +284,7 @@ class VMStructs { // This macro generates the sentinel value indicating the end of the list #define GENERATE_VM_LONG_CONSTANT_LAST_ENTRY() \ - { NULL, 0 } + { nullptr, 0 } //-------------------------------------------------------------------------------- @@ -302,6 +302,6 @@ class VMStructs { // This macro generates the sentinel value indicating the end of the list #define GENERATE_VM_ADDRESS_LAST_ENTRY() \ - { NULL, NULL } + { nullptr, nullptr } #endif // SHARE_RUNTIME_VMSTRUCTS_HPP diff --git a/src/hotspot/share/runtime/vmThread.cpp b/src/hotspot/share/runtime/vmThread.cpp index c1f226702cd3e..2ba66dfd84e18 100644 --- a/src/hotspot/share/runtime/vmThread.cpp +++ b/src/hotspot/share/runtime/vmThread.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -101,16 +101,16 @@ static VM_Cleanup cleanup_op; bool VMThread::_should_terminate = false; bool VMThread::_terminated = false; -Monitor* VMThread::_terminate_lock = NULL; -VMThread* VMThread::_vm_thread = NULL; -VM_Operation* VMThread::_cur_vm_operation = NULL; +Monitor* VMThread::_terminate_lock = nullptr; +VMThread* VMThread::_vm_thread = nullptr; +VM_Operation* VMThread::_cur_vm_operation = nullptr; VM_Operation* VMThread::_next_vm_operation = &cleanup_op; // Prevent any thread from setting an operation until VM thread is ready. -PerfCounter* VMThread::_perf_accumulated_vm_operation_time = NULL; -VMOperationTimeoutTask* VMThread::_timeout_task = NULL; +PerfCounter* VMThread::_perf_accumulated_vm_operation_time = nullptr; +VMOperationTimeoutTask* VMThread::_timeout_task = nullptr; void VMThread::create() { - assert(vm_thread() == NULL, "we can only allocate one VMThread"); + assert(vm_thread() == nullptr, "we can only allocate one VMThread"); _vm_thread = new VMThread(); if (AbortVMOnVMOperationTimeout) { @@ -125,7 +125,7 @@ void VMThread::create() { _timeout_task = new VMOperationTimeoutTask(interval); _timeout_task->enroll(); } else { - assert(_timeout_task == NULL, "sanity"); + assert(_timeout_task == nullptr, "sanity"); } _terminate_lock = new Monitor(Mutex::nosafepoint, "VMThreadTerminate_lock"); @@ -144,7 +144,7 @@ VMThread::VMThread() : NamedThread(), _is_running(false) { } void VMThread::destroy() { - _vm_thread = NULL; // VM thread is gone + _vm_thread = nullptr; // VM thread is gone } static VM_Halt halt_op; @@ -177,7 +177,7 @@ void VMThread::run() { // Note the intention to exit before safepointing. // 6295565 This has the effect of waiting for any large tty // outputs to finish. - if (xtty != NULL) { + if (xtty != nullptr) { ttyLocker ttyl; xtty->begin_elem("destroy_vm"); xtty->stamp(); @@ -212,7 +212,7 @@ void VMThread::run() { // signal other threads that VM process is gone { // Note: we must have the _no_safepoint_check_flag. Mutex::lock() allows - // VM thread to enter any lock at Safepoint as long as its _owner is NULL. + // VM thread to enter any lock at Safepoint as long as its _owner is null. // If that happens after _terminate_lock->wait() has unset _owner // but before it actually drops the lock and waits, the notification below // may get lost and we will have a hang. To avoid this, we need to use @@ -257,8 +257,8 @@ void VMThread::wait_for_vm_thread_exit() { } static void post_vm_operation_event(EventExecuteVMOperation* event, VM_Operation* op) { - assert(event != NULL, "invariant"); - assert(op != NULL, "invariant"); + assert(event != nullptr, "invariant"); + assert(op != nullptr, "invariant"); const bool evaluate_at_safepoint = op->evaluate_at_safepoint(); event->set_operation(op->type()); event->set_safepoint(evaluate_at_safepoint); @@ -301,8 +301,8 @@ class HandshakeALotClosure : public HandshakeClosure { }; bool VMThread::handshake_alot() { - assert(_cur_vm_operation == NULL, "should not have an op yet"); - assert(_next_vm_operation == NULL, "should not have an op yet"); + assert(_cur_vm_operation == nullptr, "should not have an op yet"); + assert(_next_vm_operation == nullptr, "should not have an op yet"); if (!HandshakeALot) { return false; } @@ -320,8 +320,8 @@ bool VMThread::handshake_alot() { } void VMThread::setup_periodic_safepoint_if_needed() { - assert(_cur_vm_operation == NULL, "Already have an op"); - assert(_next_vm_operation == NULL, "Already have an op"); + assert(_cur_vm_operation == nullptr, "Already have an op"); + assert(_next_vm_operation == nullptr, "Already have an op"); // Check for a cleanup before SafepointALot to keep stats correct. jlong interval_ms = SafepointTracing::time_since_last_safepoint_ms(); bool max_time_exceeded = GuaranteedSafepointInterval != 0 && @@ -337,7 +337,7 @@ void VMThread::setup_periodic_safepoint_if_needed() { } bool VMThread::set_next_operation(VM_Operation *op) { - if (_next_vm_operation != NULL) { + if (_next_vm_operation != nullptr) { return false; } log_debug(vmthread)("Adding VM operation: %s", op->name()); @@ -391,8 +391,8 @@ static void self_destruct_if_needed() { void VMThread::inner_execute(VM_Operation* op) { assert(Thread::current()->is_VM_thread(), "Must be the VM thread"); - VM_Operation* prev_vm_operation = NULL; - if (_cur_vm_operation != NULL) { + VM_Operation* prev_vm_operation = nullptr; + if (_cur_vm_operation != nullptr) { // Check that the VM operation allows nested VM operation. // This is normally not the case, e.g., the compiler // does not allow nested scavenges or compiles. @@ -407,10 +407,10 @@ void VMThread::inner_execute(VM_Operation* op) { _cur_vm_operation = op; HandleMark hm(VMThread::vm_thread()); - EventMarkVMOperation em("Executing %sVM operation: %s", prev_vm_operation != NULL ? "nested " : "", op->name()); + EventMarkVMOperation em("Executing %sVM operation: %s", prev_vm_operation != nullptr ? "nested " : "", op->name()); log_debug(vmthread)("Evaluating %s %s VM operation: %s", - prev_vm_operation != NULL ? "nested" : "", + prev_vm_operation != nullptr ? "nested" : "", _cur_vm_operation->evaluate_at_safepoint() ? "safepoint" : "non-safepoint", _cur_vm_operation->name()); @@ -443,13 +443,13 @@ void VMThread::wait_for_operation() { // Clear previous operation. // On first call this clears a dummy place-holder. - _next_vm_operation = NULL; + _next_vm_operation = nullptr; // Notify operation is done and notify a next operation can be installed. ml_op_lock.notify_all(); while (!should_terminate()) { self_destruct_if_needed(); - if (_next_vm_operation != NULL) { + if (_next_vm_operation != nullptr) { return; } if (handshake_alot()) { @@ -459,15 +459,15 @@ void VMThread::wait_for_operation() { Handshake::execute(&hal_cl); } // When we unlocked above someone might have setup a new op. - if (_next_vm_operation != NULL) { + if (_next_vm_operation != nullptr) { return; } } - assert(_next_vm_operation == NULL, "Must be"); - assert(_cur_vm_operation == NULL, "Must be"); + assert(_next_vm_operation == nullptr, "Must be"); + assert(_cur_vm_operation == nullptr, "Must be"); setup_periodic_safepoint_if_needed(); - if (_next_vm_operation != NULL) { + if (_next_vm_operation != nullptr) { return; } @@ -478,7 +478,7 @@ void VMThread::wait_for_operation() { } void VMThread::loop() { - assert(_cur_vm_operation == NULL, "no current one should be executing"); + assert(_cur_vm_operation == nullptr, "no current one should be executing"); SafepointSynchronize::init(_vm_thread); @@ -491,7 +491,7 @@ void VMThread::loop() { if (should_terminate()) break; wait_for_operation(); if (should_terminate()) break; - assert(_next_vm_operation != NULL, "Must have one"); + assert(_next_vm_operation != nullptr, "Must have one"); inner_execute(_next_vm_operation); } } @@ -551,5 +551,5 @@ void VMThread::execute(VM_Operation* op) { } void VMThread::verify() { - oops_do(&VerifyOopClosure::verify_oop, NULL); + oops_do(&VerifyOopClosure::verify_oop, nullptr); } diff --git a/src/hotspot/share/runtime/vmThread.hpp b/src/hotspot/share/runtime/vmThread.hpp index 34d95167f2852..6c87544acab9e 100644 --- a/src/hotspot/share/runtime/vmThread.hpp +++ b/src/hotspot/share/runtime/vmThread.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -110,7 +110,7 @@ class VMThread: public NamedThread { static VM_Operation::VMOp_Type vm_op_type() { VM_Operation* op = vm_operation(); - assert(op != NULL, "sanity"); + assert(op != nullptr, "sanity"); return op->type(); }