diff --git a/src/hotspot/share/memory/arena.cpp b/src/hotspot/share/memory/arena.cpp index e6ee8de38a7..313c79dbd49 100644 --- a/src/hotspot/share/memory/arena.cpp +++ b/src/hotspot/share/memory/arena.cpp @@ -419,10 +419,10 @@ void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFail // Determine if pointer belongs to this Arena or not. bool Arena::contains( const void *ptr ) const { + if (_chunk == NULL) return false; #ifdef ASSERT if (UseMallocOnly) { // really slow, but not easy to make fast - if (_chunk == NULL) return false; char** bottom = (char**)_chunk->bottom(); for (char** p = (char**)_hwm - 1; p >= bottom; p--) { if (*p == ptr) return true; diff --git a/src/hotspot/share/opto/bytecodeInfo.cpp b/src/hotspot/share/opto/bytecodeInfo.cpp index 25ebd432ff6..1e252a87c01 100644 --- a/src/hotspot/share/opto/bytecodeInfo.cpp +++ b/src/hotspot/share/opto/bytecodeInfo.cpp @@ -43,7 +43,7 @@ InlineTree::InlineTree(Compile* c, JVMState* caller_jvms, int caller_bci, int max_inline_level) : C(c), - _caller_jvms(caller_jvms), + _caller_jvms(NULL), _method(callee), _caller_tree((InlineTree*) caller_tree), _count_inline_bcs(method()->code_size_for_inlining()), @@ -55,13 +55,13 @@ InlineTree::InlineTree(Compile* c, _count_inlines = 0; _forced_inline = false; #endif - if (_caller_jvms != NULL) { + if (caller_jvms != NULL) { // Keep a private copy of the caller_jvms: _caller_jvms = new (C) JVMState(caller_jvms->method(), caller_tree->caller_jvms()); _caller_jvms->set_bci(caller_jvms->bci()); assert(!caller_jvms->should_reexecute(), "there should be no reexecute bytecode with inlining"); + assert(_caller_jvms->same_calls_as(caller_jvms), "consistent JVMS"); } - assert(_caller_jvms->same_calls_as(caller_jvms), "consistent JVMS"); assert((caller_tree == NULL ? 0 : caller_tree->stack_depth() + 1) == stack_depth(), "correct (redundant) depth parameter"); assert(caller_bci == this->caller_bci(), "correct (redundant) bci parameter"); // Update hierarchical counts, count_inline_bcs() and count_inlines() diff --git a/src/hotspot/share/opto/chaitin.hpp b/src/hotspot/share/opto/chaitin.hpp index 842d5c652d4..646ba100f20 100644 --- a/src/hotspot/share/opto/chaitin.hpp +++ b/src/hotspot/share/opto/chaitin.hpp @@ -730,8 +730,8 @@ class PhaseChaitin : public PhaseRegAlloc { int yank_if_dead_recurse(Node *old, Node *orig_old, Block *current_block, Node_List *value, Node_List *regnd); int yank( Node *old, Block *current_block, Node_List *value, Node_List *regnd ); - int elide_copy( Node *n, int k, Block *current_block, Node_List &value, Node_List ®nd, bool can_change_regs ); - int use_prior_register( Node *copy, uint idx, Node *def, Block *current_block, Node_List &value, Node_List ®nd ); + int elide_copy( Node *n, int k, Block *current_block, Node_List *value, Node_List *regnd, bool can_change_regs ); + int use_prior_register( Node *copy, uint idx, Node *def, Block *current_block, Node_List *value, Node_List *regnd ); bool may_be_copy_of_callee( Node *def ) const; // If nreg already contains the same constant as val then eliminate it diff --git a/src/hotspot/share/opto/postaloc.cpp b/src/hotspot/share/opto/postaloc.cpp index aa9ae37a78a..520505e46bb 100644 --- a/src/hotspot/share/opto/postaloc.cpp +++ b/src/hotspot/share/opto/postaloc.cpp @@ -30,7 +30,7 @@ // See if this register (or pairs, or vector) already contains the value. static bool register_contains_value(Node* val, OptoReg::Name reg, int n_regs, - Node_List& value) { + const Node_List &value) { for (int i = 0; i < n_regs; i++) { OptoReg::Name nreg = OptoReg::add(reg,-i); if (value[nreg] != val) @@ -77,7 +77,7 @@ bool PhaseChaitin::may_be_copy_of_callee( Node *def ) const { //------------------------------yank----------------------------------- // Helper function for yank_if_dead -int PhaseChaitin::yank( Node *old, Block *current_block, Node_List *value, Node_List *regnd ) { +int PhaseChaitin::yank(Node *old, Block *current_block, Node_List *value, Node_List *regnd) { int blk_adjust=0; Block *oldb = _cfg.get_block_for_node(old); oldb->find_remove(old); @@ -87,9 +87,10 @@ int PhaseChaitin::yank( Node *old, Block *current_block, Node_List *value, Node_ } _cfg.unmap_node_from_block(old); OptoReg::Name old_reg = lrgs(_lrg_map.live_range_id(old)).reg(); - if( regnd && (*regnd)[old_reg]==old ) { // Instruction is currently available? - value->map(old_reg,NULL); // Yank from value/regnd maps - regnd->map(old_reg,NULL); // This register's value is now unknown + assert(value != NULL || regnd == NULL, "sanity"); + if (value != NULL && regnd != NULL && regnd->at(old_reg) == old) { // Instruction is currently available? + value->map(old_reg, NULL); // Yank from value/regnd maps + regnd->map(old_reg, NULL); // This register's value is now unknown } return blk_adjust; } @@ -161,7 +162,7 @@ int PhaseChaitin::yank_if_dead_recurse(Node *old, Node *orig_old, Block *current // Use the prior value instead of the current value, in an effort to make // the current value go dead. Return block iterator adjustment, in case // we yank some instructions from this block. -int PhaseChaitin::use_prior_register( Node *n, uint idx, Node *def, Block *current_block, Node_List &value, Node_List ®nd ) { +int PhaseChaitin::use_prior_register( Node *n, uint idx, Node *def, Block *current_block, Node_List *value, Node_List *regnd ) { // No effect? if( def == n->in(idx) ) return 0; // Def is currently dead and can be removed? Do not resurrect @@ -207,7 +208,7 @@ int PhaseChaitin::use_prior_register( Node *n, uint idx, Node *def, Block *curre _post_alloc++; // Is old def now dead? We successfully yanked a copy? - return yank_if_dead(old,current_block,&value,®nd); + return yank_if_dead(old,current_block,value,regnd); } @@ -229,7 +230,7 @@ Node *PhaseChaitin::skip_copies( Node *c ) { //------------------------------elide_copy------------------------------------- // Remove (bypass) copies along Node n, edge k. -int PhaseChaitin::elide_copy( Node *n, int k, Block *current_block, Node_List &value, Node_List ®nd, bool can_change_regs ) { +int PhaseChaitin::elide_copy( Node *n, int k, Block *current_block, Node_List *value, Node_List *regnd, bool can_change_regs ) { int blk_adjust = 0; uint nk_idx = _lrg_map.live_range_id(n->in(k)); @@ -253,11 +254,14 @@ int PhaseChaitin::elide_copy( Node *n, int k, Block *current_block, Node_List &v // Phis and 2-address instructions cannot change registers so easily - their // outputs must match their input. - if( !can_change_regs ) + if (!can_change_regs) { return blk_adjust; // Only check stupid copies! - + } // Loop backedges won't have a value-mapping yet - if( &value == NULL ) return blk_adjust; + assert(regnd != NULL || value == NULL, "sanity"); + if (value == NULL || regnd == NULL) { + return blk_adjust; + } // Skip through all copies to the _value_ being used. Do not change from // int to pointer. This attempts to jump through a chain of copies, where @@ -273,10 +277,11 @@ int PhaseChaitin::elide_copy( Node *n, int k, Block *current_block, Node_List &v // See if it happens to already be in the correct register! // (either Phi's direct register, or the common case of the name // never-clobbered original-def register) - if (register_contains_value(val, val_reg, n_regs, value)) { - blk_adjust += use_prior_register(n,k,regnd[val_reg],current_block,value,regnd); - if( n->in(k) == regnd[val_reg] ) // Success! Quit trying - return blk_adjust; + if (register_contains_value(val, val_reg, n_regs, *value)) { + blk_adjust += use_prior_register(n,k,regnd->at(val_reg),current_block,value,regnd); + if (n->in(k) == regnd->at(val_reg)) { + return blk_adjust; // Success! Quit trying + } } // See if we can skip the copy by changing registers. Don't change from @@ -304,7 +309,7 @@ int PhaseChaitin::elide_copy( Node *n, int k, Block *current_block, Node_List &v if (ignore_self) continue; } - Node *vv = value[reg]; + Node *vv = value->at(reg); // For scalable register, number of registers may be inconsistent between // "val_reg" and "reg". For example, when "val" resides in register // but "reg" is located in stack. @@ -326,7 +331,7 @@ int PhaseChaitin::elide_copy( Node *n, int k, Block *current_block, Node_List &v last = (n_regs-1); // Looking for the last part of a set } if ((reg&last) != last) continue; // Wrong part of a set - if (!register_contains_value(vv, reg, n_regs, value)) continue; // Different value + if (!register_contains_value(vv, reg, n_regs, *value)) continue; // Different value } if( vv == val || // Got a direct hit? (t && vv && vv->bottom_type() == t && vv->is_Mach() && @@ -334,9 +339,9 @@ int PhaseChaitin::elide_copy( Node *n, int k, Block *current_block, Node_List &v assert( !n->is_Phi(), "cannot change registers at a Phi so easily" ); if( OptoReg::is_stack(nk_reg) || // CISC-loading from stack OR OptoReg::is_reg(reg) || // turning into a register use OR - regnd[reg]->outcnt()==1 ) { // last use of a spill-load turns into a CISC use - blk_adjust += use_prior_register(n,k,regnd[reg],current_block,value,regnd); - if( n->in(k) == regnd[reg] ) // Success! Quit trying + regnd->at(reg)->outcnt()==1 ) { // last use of a spill-load turns into a CISC use + blk_adjust += use_prior_register(n,k,regnd->at(reg),current_block,value,regnd); + if( n->in(k) == regnd->at(reg) ) // Success! Quit trying return blk_adjust; } // End of if not degrading to a stack } // End of if found value in another register @@ -536,7 +541,7 @@ void PhaseChaitin::post_allocate_copy_removal() { Block* pb = _cfg.get_block_for_node(block->pred(j)); // Remove copies along phi edges for (uint k = 1; k < phi_dex; k++) { - elide_copy(block->get_node(k), j, block, *blk2value[pb->_pre_order], *blk2regnd[pb->_pre_order], false); + elide_copy(block->get_node(k), j, block, blk2value[pb->_pre_order], blk2regnd[pb->_pre_order], false); } if (blk2value[pb->_pre_order]) { // Have a mapping on this edge? // See if this predecessor's mappings have been used by everybody @@ -692,7 +697,7 @@ void PhaseChaitin::post_allocate_copy_removal() { // Remove copies along input edges for (k = 1; k < n->req(); k++) { - j -= elide_copy(n, k, block, value, regnd, two_adr != k); + j -= elide_copy(n, k, block, &value, ®nd, two_adr != k); } // Unallocated Nodes define no registers diff --git a/src/hotspot/share/runtime/vmStructs.hpp b/src/hotspot/share/runtime/vmStructs.hpp index 5e9db68bd70..7b0425b17c9 100644 --- a/src/hotspot/share/runtime/vmStructs.hpp +++ b/src/hotspot/share/runtime/vmStructs.hpp @@ -188,13 +188,18 @@ class VMStructs { #ifdef ASSERT // This macro checks the type of a VMStructEntry by comparing pointer types -#define CHECK_NONSTATIC_VM_STRUCT_ENTRY(typeName, fieldName, type) \ - {typeName *dummyObj = NULL; type* dummy = &dummyObj->fieldName; \ - assert(offset_of(typeName, fieldName) < sizeof(typeName), "Illegal nonstatic struct entry, field offset too large"); } +#define CHECK_NONSTATIC_VM_STRUCT_ENTRY(typeName, fieldName, type) { \ + static_assert( \ + std::is_convertible< \ + std::add_pointer_t().fieldName)>, \ + std::add_pointer_t>::value, \ + "type mismatch for " XSTR(fieldName) " member of " XSTR(typeName)); \ + assert(offset_of(typeName, fieldName) < sizeof(typeName), "..."); \ +} // This macro checks the type of a volatile VMStructEntry by comparing pointer types -#define CHECK_VOLATILE_NONSTATIC_VM_STRUCT_ENTRY(typeName, fieldName, type) \ - {typedef type dummyvtype; typeName *dummyObj = NULL; volatile dummyvtype* dummy = &dummyObj->fieldName; } +#define CHECK_VOLATILE_NONSTATIC_VM_STRUCT_ENTRY(typeName, fieldName, type) \ + CHECK_NONSTATIC_VM_STRUCT_ENTRY(typeName, fieldName, std::add_volatile_t) // This macro checks the type of a static VMStructEntry by comparing pointer types #define CHECK_STATIC_VM_STRUCT_ENTRY(typeName, fieldName, type) \ diff --git a/src/hotspot/share/utilities/globalDefinitions.hpp b/src/hotspot/share/utilities/globalDefinitions.hpp index 88ee0248205..9b70dc25570 100644 --- a/src/hotspot/share/utilities/globalDefinitions.hpp +++ b/src/hotspot/share/utilities/globalDefinitions.hpp @@ -35,6 +35,7 @@ #include COMPILER_HEADER(utilities/globalDefinitions) #include +#include class oopDesc; @@ -1211,4 +1212,8 @@ template bool primitive_equals(const K& k0, const K& k1) { } +// Converts any type T to a reference type. +template +std::add_rvalue_reference_t declval() noexcept; + #endif // SHARE_UTILITIES_GLOBALDEFINITIONS_HPP diff --git a/src/hotspot/share/utilities/globalDefinitions_gcc.hpp b/src/hotspot/share/utilities/globalDefinitions_gcc.hpp index 30cca9ee7ae..af1270af3a9 100644 --- a/src/hotspot/share/utilities/globalDefinitions_gcc.hpp +++ b/src/hotspot/share/utilities/globalDefinitions_gcc.hpp @@ -144,10 +144,21 @@ inline int wcslen(const jchar* x) { return wcslen((const wchar_t*)x); } #endif // _LP64 // gcc warns about applying offsetof() to non-POD object or calculating -// offset directly when base address is NULL. Use 16 to get around the -// warning. The -Wno-invalid-offsetof option could be used to suppress -// this warning, but we instead just avoid the use of offsetof(). -#define offset_of(klass,field) (size_t)((intx)&(((klass*)16)->field) - 16) +// offset directly when base address is NULL. The -Wno-invalid-offsetof +// option could be used to suppress this warning, but we instead just +// avoid the use of offsetof(). +// +// FIXME: This macro is complex and rather arcane. Perhaps we should +// use offsetof() instead, with the invalid-offsetof warning +// temporarily disabled. +#define offset_of(klass,field) \ +[]() { \ + char space[sizeof (klass)] ATTRIBUTE_ALIGNED(16); \ + klass* dummyObj = (klass*)space; \ + char* c = (char*)(void*)&dummyObj->field; \ + return (size_t)(c - space); \ +}() + #if defined(_LP64) && defined(__APPLE__) #define JLONG_FORMAT "%ld"