diff --git a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp index 3f0646c5a0b59..a86cc40ab5055 100644 --- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp @@ -36,6 +36,7 @@ #include "oops/markWord.hpp" #include "oops/method.hpp" #include "oops/methodData.hpp" +#include "oops/resolvedFieldEntry.hpp" #include "oops/resolvedIndyEntry.hpp" #include "prims/jvmtiExport.hpp" #include "prims/jvmtiThreadState.hpp" @@ -1881,8 +1882,24 @@ void InterpreterMacroAssembler::load_resolved_indy_entry(Register cache, Registe get_cache_index_at_bcp(index, 1, sizeof(u4)); // Get address of invokedynamic array ldr(cache, Address(rcpool, in_bytes(ConstantPoolCache::invokedynamic_entries_offset()))); - // Scale the index to be the entry index * sizeof(ResolvedInvokeDynamicInfo) + // Scale the index to be the entry index * sizeof(ResolvedIndyEntry) lsl(index, index, log2i_exact(sizeof(ResolvedIndyEntry))); add(cache, cache, Array::base_offset_in_bytes()); lea(cache, Address(cache, index)); } + +void InterpreterMacroAssembler::load_field_entry(Register cache, Register index, int bcp_offset) { + // Get index out of bytecode pointer + get_cache_index_at_bcp(index, bcp_offset, sizeof(u2)); + // Take shortcut if the size is a power of 2 + if (is_power_of_2(sizeof(ResolvedFieldEntry))) { + lsl(index, index, log2i_exact(sizeof(ResolvedFieldEntry))); // Scale index by power of 2 + } else { + mov(cache, sizeof(ResolvedFieldEntry)); + mul(index, index, cache); // Scale the index to be the entry index * sizeof(ResolvedFieldEntry) + } + // Get address of field entries array + ldr(cache, Address(rcpool, ConstantPoolCache::field_entries_offset())); + add(cache, cache, Array::base_offset_in_bytes()); + lea(cache, Address(cache, index)); +} diff --git a/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp b/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp index 019eb23558174..70822b6c424dc 100644 --- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp @@ -321,6 +321,7 @@ class InterpreterMacroAssembler: public MacroAssembler { } void load_resolved_indy_entry(Register cache, Register index); + void load_field_entry(Register cache, Register index, int bcp_offset = 1); }; #endif // CPU_AARCH64_INTERP_MASM_AARCH64_HPP diff --git a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp index a0073bcc51273..5d57505ddc5c7 100644 --- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp @@ -38,6 +38,7 @@ #include "oops/method.hpp" #include "oops/objArrayKlass.hpp" #include "oops/oop.inline.hpp" +#include "oops/resolvedFieldEntry.hpp" #include "oops/resolvedIndyEntry.hpp" #include "prims/jvmtiExport.hpp" #include "prims/methodHandles.hpp" @@ -187,7 +188,14 @@ void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg, // additional, required work. assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); assert(load_bc_into_bc_reg, "we use bc_reg as temp"); - __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1); + __ load_field_entry(temp_reg, bc_reg); + if (byte_no == f1_byte) { + __ lea(temp_reg, Address(temp_reg, in_bytes(ResolvedFieldEntry::get_code_offset()))); + } else { + __ lea(temp_reg, Address(temp_reg, in_bytes(ResolvedFieldEntry::put_code_offset()))); + } + // Load-acquire the bytecode to match store-release in ResolvedFieldEntry::fill_in() + __ ldarb(temp_reg, temp_reg); __ movw(bc_reg, bc); __ cbzw(temp_reg, L_patch_done); // don't patch } @@ -2247,11 +2255,6 @@ void TemplateTable::resolve_cache_and_index(int byte_no, Label resolved, clinit_barrier_slow; Bytecodes::Code code = bytecode(); - switch (code) { - case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break; - case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break; - default: break; - } assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size); @@ -2279,6 +2282,69 @@ void TemplateTable::resolve_cache_and_index(int byte_no, } } +void TemplateTable::resolve_cache_and_index_for_field(int byte_no, + Register Rcache, + Register index) { + const Register temp = r19; + assert_different_registers(Rcache, index, temp); + + Label resolved; + + Bytecodes::Code code = bytecode(); + switch (code) { + case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break; + case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break; + default: break; + } + + assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); + __ load_field_entry(Rcache, index); + if (byte_no == f1_byte) { + __ lea(temp, Address(Rcache, in_bytes(ResolvedFieldEntry::get_code_offset()))); + } else { + __ lea(temp, Address(Rcache, in_bytes(ResolvedFieldEntry::put_code_offset()))); + } + // Load-acquire the bytecode to match store-release in ResolvedFieldEntry::fill_in() + __ ldarb(temp, temp); + __ subs(zr, temp, (int) code); // have we resolved this bytecode? + __ br(Assembler::EQ, resolved); + + // resolve first time through + address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache); + __ mov(temp, (int) code); + __ call_VM(noreg, entry, temp); + + // Update registers with resolved info + __ load_field_entry(Rcache, index); + __ bind(resolved); +} + +void TemplateTable::load_resolved_field_entry(Register obj, + Register cache, + Register tos_state, + Register offset, + Register flags, + bool is_static = false) { + assert_different_registers(cache, tos_state, flags, offset); + + // Field offset + __ load_sized_value(offset, Address(cache, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/); + + // Flags + __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedFieldEntry::flags_offset()))); + + // TOS state + __ load_unsigned_byte(tos_state, Address(cache, in_bytes(ResolvedFieldEntry::type_offset()))); + + // Klass overwrite register + if (is_static) { + __ ldr(obj, Address(cache, ResolvedFieldEntry::field_holder_offset())); + const int mirror_offset = in_bytes(Klass::java_mirror_offset()); + __ ldr(obj, Address(obj, mirror_offset)); + __ resolve_oop_handle(obj, r5, rscratch2); + } +} + // The Rcache and index registers must be set before call // n.b unlike x86 cache already includes the index offset void TemplateTable::load_field_cp_cache_entry(Register obj, @@ -2430,8 +2496,7 @@ void TemplateTable::jvmti_post_field_access(Register cache, Register index, __ ldrw(r0, Address(rscratch1)); __ cbzw(r0, L1); - __ get_cache_and_index_at_bcp(c_rarg2, c_rarg3, 1); - __ lea(c_rarg2, Address(c_rarg2, in_bytes(ConstantPoolCache::base_offset()))); + __ load_field_entry(c_rarg2, index); if (is_static) { __ mov(c_rarg1, zr); // null object reference @@ -2441,11 +2506,10 @@ void TemplateTable::jvmti_post_field_access(Register cache, Register index, } // c_rarg1: object pointer or null // c_rarg2: cache entry pointer - // c_rarg3: jvalue object on the stack __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), - c_rarg1, c_rarg2, c_rarg3); - __ get_cache_and_index_at_bcp(cache, index, 1); + c_rarg1, c_rarg2); + __ load_field_entry(cache, index); __ bind(L1); } } @@ -2459,17 +2523,17 @@ void TemplateTable::pop_and_check_object(Register r) void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) { - const Register cache = r2; - const Register index = r3; - const Register obj = r4; - const Register off = r19; - const Register flags = r0; - const Register raw_flags = r6; - const Register bc = r4; // uses same reg as obj, so don't mix them + const Register cache = r4; + const Register obj = r4; + const Register index = r3; + const Register tos_state = r3; + const Register off = r19; + const Register flags = r6; + const Register bc = r4; // uses same reg as obj, so don't mix them - resolve_cache_and_index(byte_no, cache, index, sizeof(u2)); + resolve_cache_and_index_for_field(byte_no, cache, index); jvmti_post_field_access(cache, index, is_static, false); - load_field_cp_cache_entry(obj, cache, index, off, raw_flags, is_static); + load_resolved_field_entry(obj, cache, tos_state, off, flags, is_static); if (!is_static) { // obj is on the stack @@ -2484,7 +2548,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr // the stores in one method and we interpret the loads in another. if (!CompilerConfig::is_c1_or_interpreter_only_no_jvmci()){ Label notVolatile; - __ tbz(raw_flags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile); + __ tbz(flags, ResolvedFieldEntry::is_volatile_shift, notVolatile); __ membar(MacroAssembler::AnyAny); __ bind(notVolatile); } @@ -2494,13 +2558,8 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr Label Done, notByte, notBool, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble; - // x86 uses a shift and mask or wings it with a shift plus assert - // the mask is not needed. aarch64 just uses bitfield extract - __ ubfxw(flags, raw_flags, ConstantPoolCacheEntry::tos_state_shift, - ConstantPoolCacheEntry::tos_state_bits); - assert(btos == 0, "change code, btos != 0"); - __ cbnz(flags, notByte); + __ cbnz(tos_state, notByte); // Don't rewrite getstatic, only getfield if (is_static) rc = may_not_rewrite; @@ -2515,7 +2574,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr __ b(Done); __ bind(notByte); - __ cmp(flags, (u1)ztos); + __ cmp(tos_state, (u1)ztos); __ br(Assembler::NE, notBool); // ztos (same code as btos) @@ -2529,7 +2588,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr __ b(Done); __ bind(notBool); - __ cmp(flags, (u1)atos); + __ cmp(tos_state, (u1)atos); __ br(Assembler::NE, notObj); // atos do_oop_load(_masm, field, r0, IN_HEAP); @@ -2540,7 +2599,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr __ b(Done); __ bind(notObj); - __ cmp(flags, (u1)itos); + __ cmp(tos_state, (u1)itos); __ br(Assembler::NE, notInt); // itos __ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg); @@ -2552,7 +2611,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr __ b(Done); __ bind(notInt); - __ cmp(flags, (u1)ctos); + __ cmp(tos_state, (u1)ctos); __ br(Assembler::NE, notChar); // ctos __ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg); @@ -2564,7 +2623,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr __ b(Done); __ bind(notChar); - __ cmp(flags, (u1)stos); + __ cmp(tos_state, (u1)stos); __ br(Assembler::NE, notShort); // stos __ access_load_at(T_SHORT, IN_HEAP, r0, field, noreg, noreg); @@ -2576,7 +2635,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr __ b(Done); __ bind(notShort); - __ cmp(flags, (u1)ltos); + __ cmp(tos_state, (u1)ltos); __ br(Assembler::NE, notLong); // ltos __ access_load_at(T_LONG, IN_HEAP, r0, field, noreg, noreg); @@ -2588,7 +2647,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr __ b(Done); __ bind(notLong); - __ cmp(flags, (u1)ftos); + __ cmp(tos_state, (u1)ftos); __ br(Assembler::NE, notFloat); // ftos __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg); @@ -2601,7 +2660,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr __ bind(notFloat); #ifdef ASSERT - __ cmp(flags, (u1)dtos); + __ cmp(tos_state, (u1)dtos); __ br(Assembler::NE, notDouble); #endif // dtos @@ -2621,7 +2680,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr __ bind(Done); Label notVolatile; - __ tbz(raw_flags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile); + __ tbz(flags, ResolvedFieldEntry::is_volatile_shift, notVolatile); __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore); __ bind(notVolatile); } @@ -2646,8 +2705,6 @@ void TemplateTable::getstatic(int byte_no) void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) { transition(vtos, vtos); - ByteSize cp_base_offset = ConstantPoolCache::base_offset(); - if (JvmtiExport::can_post_field_modification()) { // Check to see if a field modification watch has been set before // we take the time to call into the VM. @@ -2657,7 +2714,7 @@ void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is __ ldrw(r0, Address(rscratch1)); __ cbz(r0, L1); - __ get_cache_and_index_at_bcp(c_rarg2, rscratch1, 1); + __ mov(c_rarg2, cache); if (is_static) { // Life is simple. Null out the object pointer. @@ -2667,12 +2724,7 @@ void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is // the object. We don't know the size of the value, though; it // could be one or two words depending on its type. As a result, // we must find the type to determine where the object is. - __ ldrw(c_rarg3, Address(c_rarg2, - in_bytes(cp_base_offset + - ConstantPoolCacheEntry::flags_offset()))); - __ lsr(c_rarg3, c_rarg3, - ConstantPoolCacheEntry::tos_state_shift); - ConstantPoolCacheEntry::verify_tos_state_shift(); + __ load_unsigned_byte(c_rarg3, Address(c_rarg2, in_bytes(ResolvedFieldEntry::type_offset()))); Label nope2, done, ok; __ ldr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue __ cmpw(c_rarg3, ltos); @@ -2683,8 +2735,6 @@ void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is __ ldr(c_rarg1, at_tos_p2()); // ltos (two word jvalue) __ bind(nope2); } - // cache entry pointer - __ add(c_rarg2, c_rarg2, in_bytes(cp_base_offset)); // object (tos) __ mov(c_rarg3, esp); // c_rarg1: object pointer set up above (null if static) @@ -2694,7 +2744,7 @@ void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), c_rarg1, c_rarg2, c_rarg3); - __ get_cache_and_index_at_bcp(cache, index, 1); + __ load_field_entry(cache, index); __ bind(L1); } } @@ -2702,23 +2752,24 @@ void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) { transition(vtos, vtos); - const Register cache = r2; - const Register index = r3; - const Register obj = r2; - const Register off = r19; - const Register flags = r0; - const Register bc = r4; + const Register cache = r2; + const Register index = r3; + const Register tos_state = r3; + const Register obj = r2; + const Register off = r19; + const Register flags = r0; + const Register bc = r4; - resolve_cache_and_index(byte_no, cache, index, sizeof(u2)); + resolve_cache_and_index_for_field(byte_no, cache, index); jvmti_post_field_mod(cache, index, is_static); - load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); + load_resolved_field_entry(obj, cache, tos_state, off, flags, is_static); Label Done; __ mov(r5, flags); { Label notVolatile; - __ tbz(r5, ConstantPoolCacheEntry::is_volatile_shift, notVolatile); + __ tbz(r5, ResolvedFieldEntry::is_volatile_shift, notVolatile); __ membar(MacroAssembler::StoreStore | MacroAssembler::LoadStore); __ bind(notVolatile); } @@ -2729,12 +2780,8 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr Label notByte, notBool, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble; - // x86 uses a shift and mask or wings it with a shift plus assert - // the mask is not needed. aarch64 just uses bitfield extract - __ ubfxw(flags, flags, ConstantPoolCacheEntry::tos_state_shift, ConstantPoolCacheEntry::tos_state_bits); - assert(btos == 0, "change code, btos != 0"); - __ cbnz(flags, notByte); + __ cbnz(tos_state, notByte); // Don't rewrite putstatic, only putfield if (is_static) rc = may_not_rewrite; @@ -2751,7 +2798,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr } __ bind(notByte); - __ cmp(flags, (u1)ztos); + __ cmp(tos_state, (u1)ztos); __ br(Assembler::NE, notBool); // ztos @@ -2766,7 +2813,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr } __ bind(notBool); - __ cmp(flags, (u1)atos); + __ cmp(tos_state, (u1)atos); __ br(Assembler::NE, notObj); // atos @@ -2782,7 +2829,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr } __ bind(notObj); - __ cmp(flags, (u1)itos); + __ cmp(tos_state, (u1)itos); __ br(Assembler::NE, notInt); // itos @@ -2797,7 +2844,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr } __ bind(notInt); - __ cmp(flags, (u1)ctos); + __ cmp(tos_state, (u1)ctos); __ br(Assembler::NE, notChar); // ctos @@ -2812,7 +2859,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr } __ bind(notChar); - __ cmp(flags, (u1)stos); + __ cmp(tos_state, (u1)stos); __ br(Assembler::NE, notShort); // stos @@ -2827,7 +2874,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr } __ bind(notShort); - __ cmp(flags, (u1)ltos); + __ cmp(tos_state, (u1)ltos); __ br(Assembler::NE, notLong); // ltos @@ -2842,7 +2889,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr } __ bind(notLong); - __ cmp(flags, (u1)ftos); + __ cmp(tos_state, (u1)ftos); __ br(Assembler::NE, notFloat); // ftos @@ -2858,7 +2905,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr __ bind(notFloat); #ifdef ASSERT - __ cmp(flags, (u1)dtos); + __ cmp(tos_state, (u1)dtos); __ br(Assembler::NE, notDouble); #endif @@ -2883,7 +2930,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr { Label notVolatile; - __ tbz(r5, ConstantPoolCacheEntry::is_volatile_shift, notVolatile); + __ tbz(r5, ResolvedFieldEntry::is_volatile_shift, notVolatile); __ membar(MacroAssembler::StoreLoad | MacroAssembler::StoreStore); __ bind(notVolatile); } @@ -2902,8 +2949,7 @@ void TemplateTable::putstatic(int byte_no) { putfield_or_static(byte_no, true); } -void TemplateTable::jvmti_post_fast_field_mod() -{ +void TemplateTable::jvmti_post_fast_field_mod() { if (JvmtiExport::can_post_field_modification()) { // Check to see if a field modification watch has been set before // we take the time to call into the VM. @@ -2933,7 +2979,7 @@ void TemplateTable::jvmti_post_fast_field_mod() } __ mov(c_rarg3, esp); // points to jvalue on the stack // access constant pool cache entry - __ get_cache_entry_pointer_at_bcp(c_rarg2, r0, 1); + __ load_field_entry(c_rarg2, r0); __ verify_oop(r19); // r19: object pointer copied above // c_rarg2: cache entry pointer @@ -2968,21 +3014,18 @@ void TemplateTable::fast_storefield(TosState state) jvmti_post_fast_field_mod(); // access constant pool cache - __ get_cache_and_index_at_bcp(r2, r1, 1); + __ load_field_entry(r2, r1); + __ push(r0); + // R1: field offset, R2: TOS, R3: flags + load_resolved_field_entry(r2, r2, r0, r1, r3); + __ pop(r0); // Must prevent reordering of the following cp cache loads with bytecode load __ membar(MacroAssembler::LoadLoad); - // test for volatile with r3 - __ ldrw(r3, Address(r2, in_bytes(base + - ConstantPoolCacheEntry::flags_offset()))); - - // replace index with field offset from cache entry - __ ldr(r1, Address(r2, in_bytes(base + ConstantPoolCacheEntry::f2_offset()))); - { Label notVolatile; - __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile); + __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile); __ membar(MacroAssembler::StoreStore | MacroAssembler::LoadStore); __ bind(notVolatile); } @@ -3030,7 +3073,7 @@ void TemplateTable::fast_storefield(TosState state) { Label notVolatile; - __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile); + __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile); __ membar(MacroAssembler::StoreLoad | MacroAssembler::StoreStore); __ bind(notVolatile); } @@ -3049,7 +3092,7 @@ void TemplateTable::fast_accessfield(TosState state) __ ldrw(r2, Address(rscratch1)); __ cbzw(r2, L1); // access constant pool cache entry - __ get_cache_entry_pointer_at_bcp(c_rarg2, rscratch2, 1); + __ load_field_entry(c_rarg2, rscratch2); __ verify_oop(r0); __ push_ptr(r0); // save object pointer before call_VM() clobbers it __ mov(c_rarg1, r0); @@ -3064,15 +3107,13 @@ void TemplateTable::fast_accessfield(TosState state) } // access constant pool cache - __ get_cache_and_index_at_bcp(r2, r1, 1); + __ load_field_entry(r2, r1); // Must prevent reordering of the following cp cache loads with bytecode load __ membar(MacroAssembler::LoadLoad); - __ ldr(r1, Address(r2, in_bytes(ConstantPoolCache::base_offset() + - ConstantPoolCacheEntry::f2_offset()))); - __ ldrw(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() + - ConstantPoolCacheEntry::flags_offset()))); + __ load_sized_value(r1, Address(r2, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/); + __ load_unsigned_byte(r3, Address(r2, in_bytes(ResolvedFieldEntry::flags_offset()))); // r0: object __ verify_oop(r0); @@ -3087,7 +3128,7 @@ void TemplateTable::fast_accessfield(TosState state) // the stores in one method and we interpret the loads in another. if (!CompilerConfig::is_c1_or_interpreter_only_no_jvmci()) { Label notVolatile; - __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile); + __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile); __ membar(MacroAssembler::AnyAny); __ bind(notVolatile); } @@ -3124,7 +3165,7 @@ void TemplateTable::fast_accessfield(TosState state) } { Label notVolatile; - __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile); + __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile); __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore); __ bind(notVolatile); } @@ -3137,9 +3178,8 @@ void TemplateTable::fast_xaccess(TosState state) // get receiver __ ldr(r0, aaddress(0)); // access constant pool cache - __ get_cache_and_index_at_bcp(r2, r3, 2); - __ ldr(r1, Address(r2, in_bytes(ConstantPoolCache::base_offset() + - ConstantPoolCacheEntry::f2_offset()))); + __ load_field_entry(r2, r3, 2); + __ load_sized_value(r1, Address(r2, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/); // 8179954: We need to make sure that the code generated for // volatile accesses forms a sequentially-consistent set of @@ -3149,9 +3189,8 @@ void TemplateTable::fast_xaccess(TosState state) // the stores in one method and we interpret the loads in another. if (!CompilerConfig::is_c1_or_interpreter_only_no_jvmci()) { Label notVolatile; - __ ldrw(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() + - ConstantPoolCacheEntry::flags_offset()))); - __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile); + __ load_unsigned_byte(r3, Address(r2, in_bytes(ResolvedFieldEntry::flags_offset()))); + __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile); __ membar(MacroAssembler::AnyAny); __ bind(notVolatile); } @@ -3177,9 +3216,8 @@ void TemplateTable::fast_xaccess(TosState state) { Label notVolatile; - __ ldrw(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() + - ConstantPoolCacheEntry::flags_offset()))); - __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile); + __ load_unsigned_byte(r3, Address(r2, in_bytes(ResolvedFieldEntry::flags_offset()))); + __ tbz(r3, ResolvedFieldEntry::is_volatile_shift, notVolatile); __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore); __ bind(notVolatile); } diff --git a/src/hotspot/cpu/ppc/interp_masm_ppc.hpp b/src/hotspot/cpu/ppc/interp_masm_ppc.hpp index 928c48cdd174c..c868816b278e5 100644 --- a/src/hotspot/cpu/ppc/interp_masm_ppc.hpp +++ b/src/hotspot/cpu/ppc/interp_masm_ppc.hpp @@ -128,6 +128,7 @@ class InterpreterMacroAssembler: public MacroAssembler { void get_cache_and_index_at_bcp(Register cache, int bcp_offset, size_t index_size = sizeof(u2)); void load_resolved_indy_entry(Register cache, Register index); + void load_field_entry(Register cache, Register index, int bcp_offset = 1); void get_u4(Register Rdst, Register Rsrc, int offset, signedOrNot is_signed); diff --git a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp index 923fbf3ee93a8..2873fc2eca03c 100644 --- a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp +++ b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp @@ -31,6 +31,7 @@ #include "interp_masm_ppc.hpp" #include "interpreter/interpreterRuntime.hpp" #include "oops/methodData.hpp" +#include "oops/resolvedFieldEntry.hpp" #include "oops/resolvedIndyEntry.hpp" #include "prims/jvmtiExport.hpp" #include "prims/jvmtiThreadState.hpp" @@ -487,11 +488,28 @@ void InterpreterMacroAssembler::load_resolved_indy_entry(Register cache, Registe // Get address of invokedynamic array ld_ptr(cache, in_bytes(ConstantPoolCache::invokedynamic_entries_offset()), R27_constPoolCache); - // Scale the index to be the entry index * sizeof(ResolvedInvokeDynamicInfo) + // Scale the index to be the entry index * sizeof(ResolvedIndyEntry) sldi(index, index, log2i_exact(sizeof(ResolvedIndyEntry))); add(cache, cache, index); } +void InterpreterMacroAssembler::load_field_entry(Register cache, Register index, int bcp_offset) { + // Get index out of bytecode pointer + get_cache_index_at_bcp(index, bcp_offset, sizeof(u2)); + // Take shortcut if the size is a power of 2 + if (is_power_of_2(sizeof(ResolvedFieldEntry))) { + // Scale index by power of 2 + sldi(index, index, log2i_exact(sizeof(ResolvedFieldEntry))); + } else { + // Scale the index to be the entry index * sizeof(ResolvedFieldEntry) + mulli(index, index, sizeof(ResolvedFieldEntry)); + } + // Get address of field entries array + ld_ptr(cache, in_bytes(ConstantPoolCache::field_entries_offset()), R27_constPoolCache); + addi(cache, cache, Array::base_offset_in_bytes()); + add(cache, cache, index); +} + // Load object from cpool->resolved_references(index). // Kills: // - index diff --git a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp index 05de377984b9b..0995edc86153e 100644 --- a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp +++ b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp @@ -38,6 +38,7 @@ #include "oops/methodData.hpp" #include "oops/objArrayKlass.hpp" #include "oops/oop.inline.hpp" +#include "oops/resolvedFieldEntry.hpp" #include "oops/resolvedIndyEntry.hpp" #include "prims/jvmtiExport.hpp" #include "prims/methodHandles.hpp" @@ -116,13 +117,10 @@ void TemplateTable::patch_bytecode(Bytecodes::Code new_bc, Register Rnew_bc, Reg // additional, required work. assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); assert(load_bc_into_bc_reg, "we use bc_reg as temp"); - __ get_cache_and_index_at_bcp(Rtemp /* dst = cache */, 1); - // ((*(cache+indices))>>((1+byte_no)*8))&0xFF: -#if defined(VM_LITTLE_ENDIAN) - __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 1 + byte_no, Rtemp); -#else - __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (1 + byte_no), Rtemp); -#endif + __ load_field_entry(Rtemp, Rnew_bc); + int code_offset = (byte_no == f1_byte) ? in_bytes(ResolvedFieldEntry::get_code_offset()) + : in_bytes(ResolvedFieldEntry::put_code_offset()); + __ lbz(Rnew_bc, code_offset, Rtemp); __ cmpwi(CCR0, Rnew_bc, 0); __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc); __ beq(CCR0, L_patch_done); @@ -2247,6 +2245,68 @@ void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Regist __ bind(Ldone); } +void TemplateTable::resolve_cache_and_index_for_field(int byte_no, + Register Rcache, + Register index) { + assert_different_registers(Rcache, index); + + Label resolved; + + Bytecodes::Code code = bytecode(); + switch (code) { + case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break; + case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break; + default: break; + } + + assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); + __ load_field_entry(Rcache, index); + int code_offset = (byte_no == f1_byte) ? in_bytes(ResolvedFieldEntry::get_code_offset()) + : in_bytes(ResolvedFieldEntry::put_code_offset()); + __ lbz(R0, code_offset, Rcache); + __ cmpwi(CCR0, R0, (int)code); // have we resolved this bytecode? + __ beq(CCR0, resolved); + + // resolve first time through + address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache); + __ li(R4_ARG2, (int)code); + __ call_VM(noreg, entry, R4_ARG2); + + // Update registers with resolved info + __ load_field_entry(Rcache, index); + __ bind(resolved); + + // Use acquire semantics for the bytecode (see ResolvedFieldEntry::fill_in()). + __ isync(); // Order load wrt. succeeding loads. +} + +void TemplateTable::load_resolved_field_entry(Register obj, + Register cache, + Register tos_state, + Register offset, + Register flags, + bool is_static = false) { + assert_different_registers(cache, tos_state, flags, offset); + + // Field offset + __ load_sized_value(offset, in_bytes(ResolvedFieldEntry::field_offset_offset()), cache, sizeof(int), true /*is_signed*/); + + // Flags + __ lbz(flags, in_bytes(ResolvedFieldEntry::flags_offset()), cache); + + if (tos_state != noreg) { + __ lbz(tos_state, in_bytes(ResolvedFieldEntry::type_offset()), cache); + } + + // Klass overwrite register + if (is_static) { + __ ld(obj, in_bytes(ResolvedFieldEntry::field_holder_offset()), cache); + const int mirror_offset = in_bytes(Klass::java_mirror_offset()); + __ ld(obj, mirror_offset, obj); + __ resolve_oop_handle(obj, R11_scratch1, R12_scratch2, MacroAssembler::PRESERVATION_NONE); + } +} + // Load the constant pool cache entry at field accesses into registers. // The Rcache and Rindex registers must be set before call. // Input: @@ -2432,7 +2492,6 @@ void TemplateTable::jvmti_post_field_access(Register Rcache, Register Rscratch, assert_different_registers(Rcache, Rscratch); if (JvmtiExport::can_post_field_access()) { - ByteSize cp_base_offset = ConstantPoolCache::base_offset(); Label Lno_field_access_post; // Check if post field access in enabled. @@ -2443,7 +2502,6 @@ void TemplateTable::jvmti_post_field_access(Register Rcache, Register Rscratch, __ beq(CCR0, Lno_field_access_post); // Post access enabled - do it! - __ addi(Rcache, Rcache, in_bytes(cp_base_offset)); if (is_static) { __ li(R17_tos, 0); } else { @@ -2467,7 +2525,7 @@ void TemplateTable::jvmti_post_field_access(Register Rcache, Register Rscratch, __ verify_oop(R17_tos); } else { // Cache is still needed to get class or obj. - __ get_cache_and_index_at_bcp(Rcache, 1); + __ load_field_entry(Rcache, Rscratch); } __ align(32, 12); @@ -2493,9 +2551,10 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr Label Lacquire, Lisync; const Register Rcache = R3_ARG1, - Rclass_or_obj = R22_tmp2, - Roffset = R23_tmp3, - Rflags = R31, + Rclass_or_obj = R22_tmp2, // Needs to survive C call. + Roffset = R23_tmp3, // Needs to survive C call. + Rtos_state = R30, // Needs to survive C call. + Rflags = R31, // Needs to survive C call. Rbtable = R5_ARG3, Rbc = R30, Rscratch = R11_scratch1; // used by load_field_cp_cache_entry @@ -2507,37 +2566,34 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr address* branch_table = (is_static || rc == may_not_rewrite) ? static_branch_table : field_branch_table; // Get field offset. - resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2)); + resolve_cache_and_index_for_field(byte_no, Rcache, Rscratch); // JVMTI support jvmti_post_field_access(Rcache, Rscratch, is_static, false); // Load after possible GC. - load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static); // Uses R11, R12 + load_resolved_field_entry(Rclass_or_obj, Rcache, Rtos_state, Roffset, Rflags, is_static); // Uses R11, R12 // Load pointer to branch table. __ load_const_optimized(Rbtable, (address)branch_table, Rscratch); // Get volatile flag. - __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. + __ rldicl(Rscratch, Rflags, 64-ResolvedFieldEntry::is_volatile_shift, 63); // Extract volatile bit. // Note: sync is needed before volatile load on PPC64. - // Check field type. - __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); - #ifdef ASSERT Label LFlagInvalid; - __ cmpldi(CCR0, Rflags, number_of_states); + __ cmpldi(CCR0, Rtos_state, number_of_states); __ bge(CCR0, LFlagInvalid); #endif // Load from branch table and dispatch (volatile case: one instruction ahead). - __ sldi(Rflags, Rflags, LogBytesPerWord); + __ sldi(Rtos_state, Rtos_state, LogBytesPerWord); __ cmpwi(CCR2, Rscratch, 1); // Volatile? if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile ? size of 1 instruction : 0. } - __ ldx(Rbtable, Rbtable, Rflags); + __ ldx(Rbtable, Rbtable, Rtos_state); // Get the obj from stack. if (!is_static) { @@ -2753,10 +2809,8 @@ void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rscratch, boo __ beq(CCR0, Lno_field_mod_post); // Do the post - ByteSize cp_base_offset = ConstantPoolCache::base_offset(); const Register Robj = Rscratch; - __ addi(Rcache, Rcache, in_bytes(cp_base_offset)); if (is_static) { // Life is simple. Null out the object pointer. __ li(Robj, 0); @@ -2777,17 +2831,16 @@ void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rscratch, boo default: { offs = 0; base = Robj; - const Register Rflags = Robj; + const Register Rtos_state = Robj; Label is_one_slot; // Life is harder. The stack holds the value on top, followed by the // object. We don't know the size of the value, though; it could be // one or two words depending on its type. As a result, we must find // the type to determine where the object is. - __ ld(Rflags, in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache); // Big Endian - __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); + __ lbz(Rtos_state, in_bytes(ResolvedFieldEntry::type_offset()), Rcache); - __ cmpwi(CCR0, Rflags, ltos); - __ cmpwi(CCR1, Rflags, dtos); + __ cmpwi(CCR0, Rtos_state, ltos); + __ cmpwi(CCR1, Rtos_state, dtos); __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(1)); __ crnor(CCR0, Assembler::equal, CCR1, Assembler::equal); __ beq(CCR0, is_one_slot); @@ -2802,7 +2855,7 @@ void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rscratch, boo __ addi(R6_ARG4, R15_esp, Interpreter::expr_offset_in_bytes(0)); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), Robj, Rcache, R6_ARG4); - __ get_cache_and_index_at_bcp(Rcache, 1); + __ load_field_entry(Rcache, Rscratch); // In case of the fast versions, value lives in registers => put it back on tos. switch(bytecode()) { @@ -2830,7 +2883,8 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod). Rclass_or_obj = R31, // Needs to survive C call. Roffset = R22_tmp2, // Needs to survive C call. - Rflags = R30, + Rtos_state = R23_tmp3, // Needs to survive C call. + Rflags = R30, // Needs to survive C call. Rbtable = R4_ARG2, Rscratch = R11_scratch1, // used by load_field_cp_cache_entry Rscratch2 = R12_scratch2, // used by load_field_cp_cache_entry @@ -2850,32 +2904,29 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr // obj // Load the field offset. - resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2)); + resolve_cache_and_index_for_field(byte_no, Rcache, Rscratch); jvmti_post_field_mod(Rcache, Rscratch, is_static); - load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static); // Uses R11, R12 + load_resolved_field_entry(Rclass_or_obj, Rcache, Rtos_state, Roffset, Rflags, is_static); // Uses R11, R12 // Load pointer to branch table. __ load_const_optimized(Rbtable, (address)branch_table, Rscratch); // Get volatile flag. - __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. - - // Check the field type. - __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); + __ rldicl(Rscratch, Rflags, 64-ResolvedFieldEntry::is_volatile_shift, 63); // Extract volatile bit. #ifdef ASSERT Label LFlagInvalid; - __ cmpldi(CCR0, Rflags, number_of_states); + __ cmpldi(CCR0, Rtos_state, number_of_states); __ bge(CCR0, LFlagInvalid); #endif // Load from branch table and dispatch (volatile case: one instruction ahead). - __ sldi(Rflags, Rflags, LogBytesPerWord); + __ sldi(Rtos_state, Rtos_state, LogBytesPerWord); if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpwi(CR_is_vol, Rscratch, 1); // Volatile? } __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile? size of instruction 1 : 0. - __ ldx(Rbtable, Rbtable, Rflags); + __ ldx(Rbtable, Rbtable, Rtos_state); __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point. __ mtctr(Rbtable); @@ -3085,15 +3136,15 @@ void TemplateTable::fast_storefield(TosState state) { const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store). // Constant pool already resolved => Load flags and offset of field. - __ get_cache_and_index_at_bcp(Rcache, 1); + __ load_field_entry(Rcache, Rscratch); jvmti_post_field_mod(Rcache, Rscratch, false /* not static */); - load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); // Uses R11, R12 + load_resolved_field_entry(noreg, Rcache, noreg, Roffset, Rflags, false); // Uses R11, R12 // Get the obj and the final store addr. pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1. // Get volatile flag. - __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. + __ rldicl_(Rscratch, Rflags, 64-ResolvedFieldEntry::is_volatile_shift, 63); // Extract volatile bit. if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpdi(CR_is_vol, Rscratch, 1); } { Label LnotVolatile; @@ -3166,8 +3217,8 @@ void TemplateTable::fast_accessfield(TosState state) { // R12_scratch2 used by load_field_cp_cache_entry // Constant pool already resolved. Get the field offset. - __ get_cache_and_index_at_bcp(Rcache, 1); - load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); // Uses R11, R12 + __ load_field_entry(Rcache, Rscratch); + load_resolved_field_entry(noreg, Rcache, noreg, Roffset, Rflags, false); // Uses R11, R12 // JVMTI support jvmti_post_field_access(Rcache, Rscratch, false, true); @@ -3176,7 +3227,7 @@ void TemplateTable::fast_accessfield(TosState state) { __ null_check_throw(Rclass_or_obj, -1, Rscratch); // Get volatile flag. - __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. + __ rldicl_(Rscratch, Rflags, 64-ResolvedFieldEntry::is_volatile_shift, 63); // Extract volatile bit. __ bne(CCR0, LisVolatile); switch(bytecode()) { @@ -3305,8 +3356,8 @@ void TemplateTable::fast_xaccess(TosState state) { __ ld(Rclass_or_obj, 0, R18_locals); // Constant pool already resolved. Get the field offset. - __ get_cache_and_index_at_bcp(Rcache, 2); - load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); // Uses R11, R12 + __ load_field_entry(Rcache, Rscratch, 2); + load_resolved_field_entry(noreg, Rcache, noreg, Roffset, Rflags, false); // Uses R11, R12 // JVMTI support not needed, since we switch back to single bytecode as soon as debugger attaches. @@ -3317,7 +3368,7 @@ void TemplateTable::fast_xaccess(TosState state) { __ null_check_throw(Rclass_or_obj, -1, Rscratch); // Get volatile flag. - __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. + __ rldicl_(Rscratch, Rflags, 64-ResolvedFieldEntry::is_volatile_shift, 63); // Extract volatile bit. __ bne(CCR0, LisVolatile); switch(state) { diff --git a/src/hotspot/cpu/riscv/interp_masm_riscv.cpp b/src/hotspot/cpu/riscv/interp_masm_riscv.cpp index 64d0b12b4e348..b8e155ccbba0a 100644 --- a/src/hotspot/cpu/riscv/interp_masm_riscv.cpp +++ b/src/hotspot/cpu/riscv/interp_masm_riscv.cpp @@ -36,6 +36,7 @@ #include "oops/markWord.hpp" #include "oops/method.hpp" #include "oops/methodData.hpp" +#include "oops/resolvedFieldEntry.hpp" #include "oops/resolvedIndyEntry.hpp" #include "prims/jvmtiExport.hpp" #include "prims/jvmtiThreadState.hpp" @@ -1986,13 +1987,30 @@ void InterpreterMacroAssembler::load_resolved_indy_entry(Register cache, Registe get_cache_index_at_bcp(index, cache, 1, sizeof(u4)); // Get address of invokedynamic array ld(cache, Address(xcpool, in_bytes(ConstantPoolCache::invokedynamic_entries_offset()))); - // Scale the index to be the entry index * sizeof(ResolvedInvokeDynamicInfo) + // Scale the index to be the entry index * sizeof(ResolvedIndyEntry) slli(index, index, log2i_exact(sizeof(ResolvedIndyEntry))); add(cache, cache, Array::base_offset_in_bytes()); add(cache, cache, index); la(cache, Address(cache, 0)); } +void InterpreterMacroAssembler::load_field_entry(Register cache, Register index, int bcp_offset) { + // Get index out of bytecode pointer + get_cache_index_at_bcp(index, cache, bcp_offset, sizeof(u2)); + // Take shortcut if the size is a power of 2 + if (is_power_of_2(sizeof(ResolvedFieldEntry))) { + slli(index, index, log2i_exact(sizeof(ResolvedFieldEntry))); // Scale index by power of 2 + } else { + mv(cache, sizeof(ResolvedFieldEntry)); + mul(index, index, cache); // Scale the index to be the entry index * sizeof(ResolvedIndyEntry) + } + // Get address of field entries array + ld(cache, Address(xcpool, ConstantPoolCache::field_entries_offset())); + add(cache, cache, Array::base_offset_in_bytes()); + add(cache, cache, index); + la(cache, Address(cache, 0)); +} + void InterpreterMacroAssembler::get_method_counters(Register method, Register mcs, Label& skip) { Label has_counters; diff --git a/src/hotspot/cpu/riscv/interp_masm_riscv.hpp b/src/hotspot/cpu/riscv/interp_masm_riscv.hpp index fbccf2401be67..9b004cb081bfb 100644 --- a/src/hotspot/cpu/riscv/interp_masm_riscv.hpp +++ b/src/hotspot/cpu/riscv/interp_masm_riscv.hpp @@ -300,6 +300,7 @@ class InterpreterMacroAssembler: public MacroAssembler { } void load_resolved_indy_entry(Register cache, Register index); + void load_field_entry(Register cache, Register index, int bcp_offset = 1); #ifdef ASSERT void verify_access_flags(Register access_flags, uint32_t flag, diff --git a/src/hotspot/cpu/riscv/templateTable_riscv.cpp b/src/hotspot/cpu/riscv/templateTable_riscv.cpp index 836a0a06d2cef..87efd3522d91e 100644 --- a/src/hotspot/cpu/riscv/templateTable_riscv.cpp +++ b/src/hotspot/cpu/riscv/templateTable_riscv.cpp @@ -38,6 +38,7 @@ #include "oops/methodData.hpp" #include "oops/objArrayKlass.hpp" #include "oops/oop.inline.hpp" +#include "oops/resolvedFieldEntry.hpp" #include "oops/resolvedIndyEntry.hpp" #include "prims/jvmtiExport.hpp" #include "prims/methodHandles.hpp" @@ -169,7 +170,16 @@ void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg, // additional, required work. assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); assert(load_bc_into_bc_reg, "we use bc_reg as temp"); - __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1); + __ load_field_entry(temp_reg, bc_reg); + if (byte_no == f1_byte) { + __ la(temp_reg, Address(temp_reg, in_bytes(ResolvedFieldEntry::get_code_offset()))); + } else { + __ la(temp_reg, Address(temp_reg, in_bytes(ResolvedFieldEntry::put_code_offset()))); + } + // Load-acquire the bytecode to match store-release in ResolvedFieldEntry::fill_in() + __ membar(MacroAssembler::AnyAny); + __ lbu(temp_reg, Address(temp_reg, 0)); + __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore); __ mv(bc_reg, bc); __ beqz(temp_reg, L_patch_done); break; @@ -2155,11 +2165,6 @@ void TemplateTable::resolve_cache_and_index(int byte_no, Label resolved, clinit_barrier_slow; Bytecodes::Code code = bytecode(); - switch (code) { - case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break; - case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break; - default: break; - } assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size); @@ -2188,6 +2193,71 @@ void TemplateTable::resolve_cache_and_index(int byte_no, } } +void TemplateTable::resolve_cache_and_index_for_field(int byte_no, + Register Rcache, + Register index) { + const Register temp = x9; + assert_different_registers(Rcache, index, temp); + + Label resolved; + + Bytecodes::Code code = bytecode(); + switch (code) { + case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break; + case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break; + default: break; + } + + assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); + __ load_field_entry(Rcache, index); + if (byte_no == f1_byte) { + __ la(temp, Address(Rcache, in_bytes(ResolvedFieldEntry::get_code_offset()))); + } else { + __ la(temp, Address(Rcache, in_bytes(ResolvedFieldEntry::put_code_offset()))); + } + // Load-acquire the bytecode to match store-release in ResolvedFieldEntry::fill_in() + __ membar(MacroAssembler::AnyAny); + __ lbu(temp, Address(temp, 0)); + __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore); + __ mv(t0, (int) code); // have we resolved this bytecode? + __ beq(temp, t0, resolved); + + // resolve first time through + address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache); + __ mv(temp, (int) code); + __ call_VM(noreg, entry, temp); + + // Update registers with resolved info + __ load_field_entry(Rcache, index); + __ bind(resolved); +} + +void TemplateTable::load_resolved_field_entry(Register obj, + Register cache, + Register tos_state, + Register offset, + Register flags, + bool is_static = false) { + assert_different_registers(cache, tos_state, flags, offset); + + // Field offset + __ load_sized_value(offset, Address(cache, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/); + + // Flags + __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedFieldEntry::flags_offset()))); + + // TOS state + __ load_unsigned_byte(tos_state, Address(cache, in_bytes(ResolvedFieldEntry::type_offset()))); + + // Klass overwrite register + if (is_static) { + __ ld(obj, Address(cache, ResolvedFieldEntry::field_holder_offset())); + const int mirror_offset = in_bytes(Klass::java_mirror_offset()); + __ ld(obj, Address(obj, mirror_offset)); + __ resolve_oop_handle(obj, x15, t1); + } +} + // The Rcache and index registers must be set before call // n.b unlike x86 cache already includes the index offset void TemplateTable::load_field_cp_cache_entry(Register obj, @@ -2343,8 +2413,7 @@ void TemplateTable::jvmti_post_field_access(Register cache, Register index, __ beqz(x10, L1); - __ get_cache_and_index_at_bcp(c_rarg2, c_rarg3, 1); - __ la(c_rarg2, Address(c_rarg2, in_bytes(ConstantPoolCache::base_offset()))); + __ load_field_entry(c_rarg2, index); if (is_static) { __ mv(c_rarg1, zr); // null object reference @@ -2354,11 +2423,10 @@ void TemplateTable::jvmti_post_field_access(Register cache, Register index, } // c_rarg1: object pointer or null // c_rarg2: cache entry pointer - // c_rarg3: jvalue object on the stack __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), - c_rarg1, c_rarg2, c_rarg3); - __ get_cache_and_index_at_bcp(cache, index, 1); + c_rarg1, c_rarg2); + __ load_field_entry(cache, index); __ bind(L1); } } @@ -2370,17 +2438,17 @@ void TemplateTable::pop_and_check_object(Register r) { } void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) { - const Register cache = x12; - const Register index = x13; + const Register cache = x14; const Register obj = x14; + const Register index = x13; + const Register tos_state = x13; const Register off = x9; - const Register flags = x10; - const Register raw_flags = x16; + const Register flags = x16; const Register bc = x14; // uses same reg as obj, so don't mix them - resolve_cache_and_index(byte_no, cache, index, sizeof(u2)); + resolve_cache_and_index_for_field(byte_no, cache, index); jvmti_post_field_access(cache, index, is_static, false); - load_field_cp_cache_entry(obj, cache, index, off, raw_flags, is_static); + load_resolved_field_entry(obj, cache, tos_state, off, flags, is_static); if (!is_static) { // obj is on the stack @@ -2393,12 +2461,8 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr Label Done, notByte, notBool, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble; - __ slli(flags, raw_flags, XLEN - (ConstantPoolCacheEntry::tos_state_shift + - ConstantPoolCacheEntry::tos_state_bits)); - __ srli(flags, flags, XLEN - ConstantPoolCacheEntry::tos_state_bits); - assert(btos == 0, "change code, btos != 0"); - __ bnez(flags, notByte); + __ bnez(tos_state, notByte); // Don't rewrite getstatic, only getfield if (is_static) { @@ -2415,7 +2479,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr __ j(Done); __ bind(notByte); - __ sub(t0, flags, (u1)ztos); + __ sub(t0, tos_state, (u1)ztos); __ bnez(t0, notBool); // ztos (same code as btos) @@ -2429,7 +2493,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr __ j(Done); __ bind(notBool); - __ sub(t0, flags, (u1)atos); + __ sub(t0, tos_state, (u1)atos); __ bnez(t0, notObj); // atos do_oop_load(_masm, field, x10, IN_HEAP); @@ -2440,7 +2504,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr __ j(Done); __ bind(notObj); - __ sub(t0, flags, (u1)itos); + __ sub(t0, tos_state, (u1)itos); __ bnez(t0, notInt); // itos __ access_load_at(T_INT, IN_HEAP, x10, field, noreg, noreg); @@ -2453,7 +2517,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr __ j(Done); __ bind(notInt); - __ sub(t0, flags, (u1)ctos); + __ sub(t0, tos_state, (u1)ctos); __ bnez(t0, notChar); // ctos __ access_load_at(T_CHAR, IN_HEAP, x10, field, noreg, noreg); @@ -2465,7 +2529,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr __ j(Done); __ bind(notChar); - __ sub(t0, flags, (u1)stos); + __ sub(t0, tos_state, (u1)stos); __ bnez(t0, notShort); // stos __ access_load_at(T_SHORT, IN_HEAP, x10, field, noreg, noreg); @@ -2477,7 +2541,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr __ j(Done); __ bind(notShort); - __ sub(t0, flags, (u1)ltos); + __ sub(t0, tos_state, (u1)ltos); __ bnez(t0, notLong); // ltos __ access_load_at(T_LONG, IN_HEAP, x10, field, noreg, noreg); @@ -2489,7 +2553,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr __ j(Done); __ bind(notLong); - __ sub(t0, flags, (u1)ftos); + __ sub(t0, tos_state, (u1)ftos); __ bnez(t0, notFloat); // ftos __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg); @@ -2502,7 +2566,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr __ bind(notFloat); #ifdef ASSERT - __ sub(t0, flags, (u1)dtos); + __ sub(t0, tos_state, (u1)dtos); __ bnez(t0, notDouble); #endif // dtos @@ -2522,7 +2586,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr __ bind(Done); Label notVolatile; - __ test_bit(t0, raw_flags, ConstantPoolCacheEntry::is_volatile_shift); + __ test_bit(t0, flags, ResolvedFieldEntry::is_volatile_shift); __ beqz(t0, notVolatile); __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore); __ bind(notVolatile); @@ -2546,8 +2610,6 @@ void TemplateTable::getstatic(int byte_no) void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) { transition(vtos, vtos); - ByteSize cp_base_offset = ConstantPoolCache::base_offset(); - if (JvmtiExport::can_post_field_modification()) { // Check to see if a field modification watch has been set before // we take the time to call into the VM. @@ -2561,7 +2623,7 @@ void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is }); __ beqz(x10, L1); - __ get_cache_and_index_at_bcp(c_rarg2, t0, 1); + __ mv(c_rarg2, cache); if (is_static) { // Life is simple. Null out the object pointer. @@ -2571,11 +2633,7 @@ void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is // the object. We don't know the size of the value, though; it // could be one or two words depending on its type. As a result, // we must find the type to determine where the object is. - __ lwu(c_rarg3, Address(c_rarg2, - in_bytes(cp_base_offset + - ConstantPoolCacheEntry::flags_offset()))); - __ srli(c_rarg3, c_rarg3, ConstantPoolCacheEntry::tos_state_shift); - ConstantPoolCacheEntry::verify_tos_state_shift(); + __ load_unsigned_byte(c_rarg3, Address(c_rarg2, in_bytes(ResolvedFieldEntry::type_offset()))); Label nope2, done, ok; __ ld(c_rarg1, at_tos_p1()); // initially assume a one word jvalue __ sub(t0, c_rarg3, ltos); @@ -2586,8 +2644,6 @@ void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is __ ld(c_rarg1, at_tos_p2()); // ltos (two word jvalue); __ bind(nope2); } - // cache entry pointer - __ add(c_rarg2, c_rarg2, in_bytes(cp_base_offset)); // object (tos) __ mv(c_rarg3, esp); // c_rarg1: object pointer set up above (null if static) @@ -2597,7 +2653,7 @@ void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), c_rarg1, c_rarg2, c_rarg3); - __ get_cache_and_index_at_bcp(cache, index, 1); + __ load_field_entry(cache, index); __ bind(L1); } } @@ -2605,23 +2661,24 @@ void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) { transition(vtos, vtos); - const Register cache = x12; - const Register index = x13; - const Register obj = x12; - const Register off = x9; - const Register flags = x10; - const Register bc = x14; + const Register cache = x12; + const Register index = x13; + const Register tos_state = x13; + const Register obj = x12; + const Register off = x9; + const Register flags = x10; + const Register bc = x14; - resolve_cache_and_index(byte_no, cache, index, sizeof(u2)); + resolve_cache_and_index_for_field(byte_no, cache, index); jvmti_post_field_mod(cache, index, is_static); - load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); + load_resolved_field_entry(obj, cache, tos_state, off, flags, is_static); Label Done; __ mv(x15, flags); { Label notVolatile; - __ test_bit(t0, x15, ConstantPoolCacheEntry::is_volatile_shift); + __ test_bit(t0, x15, ResolvedFieldEntry::is_volatile_shift); __ beqz(t0, notVolatile); __ membar(MacroAssembler::StoreStore | MacroAssembler::LoadStore); __ bind(notVolatile); @@ -2630,12 +2687,8 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr Label notByte, notBool, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble; - __ slli(flags, flags, XLEN - (ConstantPoolCacheEntry::tos_state_shift + - ConstantPoolCacheEntry::tos_state_bits)); - __ srli(flags, flags, XLEN - ConstantPoolCacheEntry::tos_state_bits); - assert(btos == 0, "change code, btos != 0"); - __ bnez(flags, notByte); + __ bnez(tos_state, notByte); // Don't rewrite putstatic, only putfield if (is_static) { @@ -2659,7 +2712,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr } __ bind(notByte); - __ sub(t0, flags, (u1)ztos); + __ sub(t0, tos_state, (u1)ztos); __ bnez(t0, notBool); // ztos @@ -2679,7 +2732,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr } __ bind(notBool); - __ sub(t0, flags, (u1)atos); + __ sub(t0, tos_state, (u1)atos); __ bnez(t0, notObj); // atos @@ -2700,7 +2753,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr } __ bind(notObj); - __ sub(t0, flags, (u1)itos); + __ sub(t0, tos_state, (u1)itos); __ bnez(t0, notInt); // itos @@ -2720,7 +2773,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr } __ bind(notInt); - __ sub(t0, flags, (u1)ctos); + __ sub(t0, tos_state, (u1)ctos); __ bnez(t0, notChar); // ctos @@ -2740,7 +2793,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr } __ bind(notChar); - __ sub(t0, flags, (u1)stos); + __ sub(t0, tos_state, (u1)stos); __ bnez(t0, notShort); // stos @@ -2760,7 +2813,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr } __ bind(notShort); - __ sub(t0, flags, (u1)ltos); + __ sub(t0, tos_state, (u1)ltos); __ bnez(t0, notLong); // ltos @@ -2780,7 +2833,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr } __ bind(notLong); - __ sub(t0, flags, (u1)ftos); + __ sub(t0, tos_state, (u1)ftos); __ bnez(t0, notFloat); // ftos @@ -2801,7 +2854,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr __ bind(notFloat); #ifdef ASSERT - __ sub(t0, flags, (u1)dtos); + __ sub(t0, tos_state, (u1)dtos); __ bnez(t0, notDouble); #endif @@ -2831,7 +2884,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr { Label notVolatile; - __ test_bit(t0, x15, ConstantPoolCacheEntry::is_volatile_shift); + __ test_bit(t0, x15, ResolvedFieldEntry::is_volatile_shift); __ beqz(t0, notVolatile); __ membar(MacroAssembler::StoreLoad | MacroAssembler::StoreStore); __ bind(notVolatile); @@ -2884,7 +2937,7 @@ void TemplateTable::jvmti_post_fast_field_mod() { } __ mv(c_rarg3, esp); // points to jvalue on the stack // access constant pool cache entry - __ get_cache_entry_pointer_at_bcp(c_rarg2, x10, 1); + __ load_field_entry(c_rarg2, x10); __ verify_oop(x9); // x9: object pointer copied above // c_rarg2: cache entry pointer @@ -2918,21 +2971,18 @@ void TemplateTable::fast_storefield(TosState state) { jvmti_post_fast_field_mod(); // access constant pool cache - __ get_cache_and_index_at_bcp(x12, x11, 1); + __ load_field_entry(x12, x11); + __ push_reg(x10); + // X11: field offset, X12: TOS, X13: flags + load_resolved_field_entry(x12, x12, x10, x11, x13); + __ pop_reg(x10); // Must prevent reordering of the following cp cache loads with bytecode load __ membar(MacroAssembler::LoadLoad); - // test for volatile with x13 - __ lwu(x13, Address(x12, in_bytes(base + - ConstantPoolCacheEntry::flags_offset()))); - - // replace index with field offset from cache entry - __ ld(x11, Address(x12, in_bytes(base + ConstantPoolCacheEntry::f2_offset()))); - { Label notVolatile; - __ test_bit(t0, x13, ConstantPoolCacheEntry::is_volatile_shift); + __ test_bit(t0, x13, ResolvedFieldEntry::is_volatile_shift); __ beqz(t0, notVolatile); __ membar(MacroAssembler::StoreStore | MacroAssembler::LoadStore); __ bind(notVolatile); @@ -2980,7 +3030,7 @@ void TemplateTable::fast_storefield(TosState state) { { Label notVolatile; - __ test_bit(t0, x13, ConstantPoolCacheEntry::is_volatile_shift); + __ test_bit(t0, x13, ResolvedFieldEntry::is_volatile_shift); __ beqz(t0, notVolatile); __ membar(MacroAssembler::StoreLoad | MacroAssembler::StoreStore); __ bind(notVolatile); @@ -3002,7 +3052,7 @@ void TemplateTable::fast_accessfield(TosState state) { }); __ beqz(x12, L1); // access constant pool cache entry - __ get_cache_entry_pointer_at_bcp(c_rarg2, t1, 1); + __ load_field_entry(c_rarg2, t1); __ verify_oop(x10); __ push_ptr(x10); // save object pointer before call_VM() clobbers it __ mv(c_rarg1, x10); @@ -3017,15 +3067,13 @@ void TemplateTable::fast_accessfield(TosState state) { } // access constant pool cache - __ get_cache_and_index_at_bcp(x12, x11, 1); + __ load_field_entry(x12, x11); // Must prevent reordering of the following cp cache loads with bytecode load __ membar(MacroAssembler::LoadLoad); - __ ld(x11, Address(x12, in_bytes(ConstantPoolCache::base_offset() + - ConstantPoolCacheEntry::f2_offset()))); - __ lwu(x13, Address(x12, in_bytes(ConstantPoolCache::base_offset() + - ConstantPoolCacheEntry::flags_offset()))); + __ load_sized_value(x11, Address(x12, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/); + __ load_unsigned_byte(x13, Address(x12, in_bytes(ResolvedFieldEntry::flags_offset()))); // x10: object __ verify_oop(x10); @@ -3066,7 +3114,7 @@ void TemplateTable::fast_accessfield(TosState state) { } { Label notVolatile; - __ test_bit(t0, x13, ConstantPoolCacheEntry::is_volatile_shift); + __ test_bit(t0, x13, ResolvedFieldEntry::is_volatile_shift); __ beqz(t0, notVolatile); __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore); __ bind(notVolatile); @@ -3079,9 +3127,8 @@ void TemplateTable::fast_xaccess(TosState state) { // get receiver __ ld(x10, aaddress(0)); // access constant pool cache - __ get_cache_and_index_at_bcp(x12, x13, 2); - __ ld(x11, Address(x12, in_bytes(ConstantPoolCache::base_offset() + - ConstantPoolCacheEntry::f2_offset()))); + __ load_field_entry(x12, x13, 2); + __ load_sized_value(x11, Address(x12, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/); // make sure exception is reported in correct bcp range (getfield is // next instruction) @@ -3108,9 +3155,8 @@ void TemplateTable::fast_xaccess(TosState state) { { Label notVolatile; - __ lwu(x13, Address(x12, in_bytes(ConstantPoolCache::base_offset() + - ConstantPoolCacheEntry::flags_offset()))); - __ test_bit(t0, x13, ConstantPoolCacheEntry::is_volatile_shift); + __ load_unsigned_byte(x13, Address(x12, in_bytes(ResolvedFieldEntry::flags_offset()))); + __ test_bit(t0, x13, ResolvedFieldEntry::is_volatile_shift); __ beqz(t0, notVolatile); __ membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore); __ bind(notVolatile); diff --git a/src/hotspot/cpu/x86/interp_masm_x86.cpp b/src/hotspot/cpu/x86/interp_masm_x86.cpp index f51ed64c47e64..caa07857f03dc 100644 --- a/src/hotspot/cpu/x86/interp_masm_x86.cpp +++ b/src/hotspot/cpu/x86/interp_masm_x86.cpp @@ -32,6 +32,7 @@ #include "oops/markWord.hpp" #include "oops/methodData.hpp" #include "oops/method.hpp" +#include "oops/resolvedFieldEntry.hpp" #include "oops/resolvedIndyEntry.hpp" #include "prims/jvmtiExport.hpp" #include "prims/jvmtiThreadState.hpp" @@ -2115,7 +2116,22 @@ void InterpreterMacroAssembler::load_resolved_indy_entry(Register cache, Registe if (is_power_of_2(sizeof(ResolvedIndyEntry))) { shll(index, log2i_exact(sizeof(ResolvedIndyEntry))); // Scale index by power of 2 } else { - imull(index, index, sizeof(ResolvedIndyEntry)); // Scale the index to be the entry index * sizeof(ResolvedInvokeDynamicInfo) + imull(index, index, sizeof(ResolvedIndyEntry)); // Scale the index to be the entry index * sizeof(ResolvedIndyEntry) } lea(cache, Address(cache, index, Address::times_1, Array::base_offset_in_bytes())); } + +void InterpreterMacroAssembler::load_field_entry(Register cache, Register index, int bcp_offset) { + // Get index out of bytecode pointer + movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize)); + get_cache_index_at_bcp(index, bcp_offset, sizeof(u2)); + + movptr(cache, Address(cache, ConstantPoolCache::field_entries_offset())); + // Take shortcut if the size is a power of 2 + if (is_power_of_2(sizeof(ResolvedFieldEntry))) { + shll(index, log2i_exact(sizeof(ResolvedFieldEntry))); // Scale index by power of 2 + } else { + imull(index, index, sizeof(ResolvedFieldEntry)); // Scale the index to be the entry index * sizeof(ResolvedFieldEntry) + } + lea(cache, Address(cache, index, Address::times_1, Array::base_offset_in_bytes())); +} diff --git a/src/hotspot/cpu/x86/interp_masm_x86.hpp b/src/hotspot/cpu/x86/interp_masm_x86.hpp index 16d003cacf37c..4adddc407c873 100644 --- a/src/hotspot/cpu/x86/interp_masm_x86.hpp +++ b/src/hotspot/cpu/x86/interp_masm_x86.hpp @@ -307,7 +307,7 @@ class InterpreterMacroAssembler: public MacroAssembler { void profile_parameters_type(Register mdp, Register tmp1, Register tmp2); void load_resolved_indy_entry(Register cache, Register index); - + void load_field_entry(Register cache, Register index, int bcp_offset = 1); }; #endif // CPU_X86_INTERP_MASM_X86_HPP diff --git a/src/hotspot/cpu/x86/templateTable_x86.cpp b/src/hotspot/cpu/x86/templateTable_x86.cpp index 61e1e1bd50c71..7eab81f90953b 100644 --- a/src/hotspot/cpu/x86/templateTable_x86.cpp +++ b/src/hotspot/cpu/x86/templateTable_x86.cpp @@ -36,6 +36,7 @@ #include "oops/methodData.hpp" #include "oops/objArrayKlass.hpp" #include "oops/oop.inline.hpp" +#include "oops/resolvedFieldEntry.hpp" #include "oops/resolvedIndyEntry.hpp" #include "prims/jvmtiExport.hpp" #include "prims/methodHandles.hpp" @@ -197,7 +198,13 @@ void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg, // additional, required work. assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); assert(load_bc_into_bc_reg, "we use bc_reg as temp"); - __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1); + __ load_field_entry(temp_reg, bc_reg); + if (byte_no == f1_byte) { + __ load_unsigned_byte(temp_reg, Address(temp_reg, in_bytes(ResolvedFieldEntry::get_code_offset()))); + } else { + __ load_unsigned_byte(temp_reg, Address(temp_reg, in_bytes(ResolvedFieldEntry::put_code_offset()))); + } + __ movl(bc_reg, bc); __ cmpl(temp_reg, (int) 0); __ jcc(Assembler::zero, L_patch_done); // don't patch @@ -2656,11 +2663,6 @@ void TemplateTable::resolve_cache_and_index(int byte_no, Label resolved; Bytecodes::Code code = bytecode(); - switch (code) { - case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break; - case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break; - default: break; - } assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); __ get_cache_and_index_and_bytecode_at_bcp(cache, index, temp, byte_no, 1, index_size); @@ -2691,6 +2693,68 @@ void TemplateTable::resolve_cache_and_index(int byte_no, } } +void TemplateTable::resolve_cache_and_index_for_field(int byte_no, + Register cache, + Register index) { + const Register temp = rbx; + assert_different_registers(cache, index, temp); + + Label resolved; + + Bytecodes::Code code = bytecode(); + switch (code) { + case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break; + case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break; + default: break; + } + + assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); + __ load_field_entry(cache, index); + if (byte_no == f1_byte) { + __ load_unsigned_byte(temp, Address(cache, in_bytes(ResolvedFieldEntry::get_code_offset()))); + } else { + __ load_unsigned_byte(temp, Address(cache, in_bytes(ResolvedFieldEntry::put_code_offset()))); + } + __ cmpl(temp, code); // have we resolved this bytecode? + __ jcc(Assembler::equal, resolved); + + // resolve first time through + address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache); + __ movl(temp, code); + __ call_VM(noreg, entry, temp); + // Update registers with resolved info + __ load_field_entry(cache, index); + + __ bind(resolved); +} + +void TemplateTable::load_resolved_field_entry(Register obj, + Register cache, + Register tos_state, + Register offset, + Register flags, + bool is_static = false) { + assert_different_registers(cache, tos_state, flags, offset); + + // Field offset + __ load_sized_value(offset, Address(cache, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/); + + // Flags + __ load_unsigned_byte(flags, Address(cache, in_bytes(ResolvedFieldEntry::flags_offset()))); + + // TOS state + __ load_unsigned_byte(tos_state, Address(cache, in_bytes(ResolvedFieldEntry::type_offset()))); + + // Klass overwrite register + if (is_static) { + __ movptr(obj, Address(cache, ResolvedFieldEntry::field_holder_offset())); + const int mirror_offset = in_bytes(Klass::java_mirror_offset()); + __ movptr(obj, Address(obj, mirror_offset)); + __ resolve_oop_handle(obj, rscratch2); + } + +} + // The cache and index registers must be set before call void TemplateTable::load_field_cp_cache_entry(Register obj, Register cache, @@ -2838,9 +2902,7 @@ void TemplateTable::jvmti_post_field_access(Register cache, __ jcc(Assembler::zero, L1); // cache entry pointer - __ addptr(cache, in_bytes(ConstantPoolCache::base_offset())); - __ shll(index, LogBytesPerWord); - __ addptr(cache, index); + __ load_field_entry(cache, index); if (is_static) { __ xorptr(rax, rax); // null object reference } else { @@ -2851,8 +2913,9 @@ void TemplateTable::jvmti_post_field_access(Register cache, // rax,: object pointer or null // cache: cache entry pointer __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), - rax, cache); - __ get_cache_and_index_at_bcp(cache, index, 1); + rax, cache); + + __ load_field_entry(cache, index); __ bind(L1); } } @@ -2866,16 +2929,17 @@ void TemplateTable::pop_and_check_object(Register r) { void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) { transition(vtos, vtos); + const Register obj = LP64_ONLY(c_rarg3) NOT_LP64(rcx); const Register cache = rcx; const Register index = rdx; - const Register obj = LP64_ONLY(c_rarg3) NOT_LP64(rcx); const Register off = rbx; - const Register flags = rax; + const Register tos_state = rax; + const Register flags = rdx; const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // uses same reg as obj, so don't mix them - resolve_cache_and_index(byte_no, cache, index, sizeof(u2)); + resolve_cache_and_index_for_field(byte_no, cache, index); jvmti_post_field_access(cache, index, is_static, false); - load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); + load_resolved_field_entry(obj, cache, tos_state, off, flags, is_static); if (!is_static) pop_and_check_object(obj); @@ -2883,13 +2947,11 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr Label Done, notByte, notBool, notInt, notShort, notChar, notLong, notFloat, notObj; - __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift); // Make sure we don't need to mask edx after the above shift assert(btos == 0, "change code, btos != 0"); - - __ andl(flags, ConstantPoolCacheEntry::tos_state_mask); - + __ testl(tos_state, tos_state); __ jcc(Assembler::notZero, notByte); + // btos __ access_load_at(T_BYTE, IN_HEAP, rax, field, noreg, noreg); __ push(btos); @@ -2900,7 +2962,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr __ jmp(Done); __ bind(notByte); - __ cmpl(flags, ztos); + __ cmpl(tos_state, ztos); __ jcc(Assembler::notEqual, notBool); // ztos (same code as btos) @@ -2914,7 +2976,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr __ jmp(Done); __ bind(notBool); - __ cmpl(flags, atos); + __ cmpl(tos_state, atos); __ jcc(Assembler::notEqual, notObj); // atos do_oop_load(_masm, field, rax); @@ -2925,7 +2987,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr __ jmp(Done); __ bind(notObj); - __ cmpl(flags, itos); + __ cmpl(tos_state, itos); __ jcc(Assembler::notEqual, notInt); // itos __ access_load_at(T_INT, IN_HEAP, rax, field, noreg, noreg); @@ -2937,7 +2999,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr __ jmp(Done); __ bind(notInt); - __ cmpl(flags, ctos); + __ cmpl(tos_state, ctos); __ jcc(Assembler::notEqual, notChar); // ctos __ access_load_at(T_CHAR, IN_HEAP, rax, field, noreg, noreg); @@ -2949,7 +3011,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr __ jmp(Done); __ bind(notChar); - __ cmpl(flags, stos); + __ cmpl(tos_state, stos); __ jcc(Assembler::notEqual, notShort); // stos __ access_load_at(T_SHORT, IN_HEAP, rax, field, noreg, noreg); @@ -2961,7 +3023,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr __ jmp(Done); __ bind(notShort); - __ cmpl(flags, ltos); + __ cmpl(tos_state, ltos); __ jcc(Assembler::notEqual, notLong); // ltos // Generate code as if volatile (x86_32). There just aren't enough registers to @@ -2973,7 +3035,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr __ jmp(Done); __ bind(notLong); - __ cmpl(flags, ftos); + __ cmpl(tos_state, ftos); __ jcc(Assembler::notEqual, notFloat); // ftos @@ -2988,7 +3050,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr __ bind(notFloat); #ifdef ASSERT Label notDouble; - __ cmpl(flags, dtos); + __ cmpl(tos_state, dtos); __ jcc(Assembler::notEqual, notDouble); #endif // dtos @@ -3028,29 +3090,25 @@ void TemplateTable::getstatic(int byte_no) { // The registers cache and index expected to be set before call. // The function may destroy various registers, just not the cache and index registers. void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) { - - const Register robj = LP64_ONLY(c_rarg2) NOT_LP64(rax); - const Register RBX = LP64_ONLY(c_rarg1) NOT_LP64(rbx); - const Register RCX = LP64_ONLY(c_rarg3) NOT_LP64(rcx); - const Register RDX = LP64_ONLY(rscratch1) NOT_LP64(rdx); - - ByteSize cp_base_offset = ConstantPoolCache::base_offset(); + // Cache is rcx and index is rdx + const Register entry = LP64_ONLY(c_rarg2) NOT_LP64(rax); // ResolvedFieldEntry + const Register obj = LP64_ONLY(c_rarg1) NOT_LP64(rbx); // Object pointer + const Register value = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // JValue object if (JvmtiExport::can_post_field_modification()) { // Check to see if a field modification watch has been set before // we take the time to call into the VM. Label L1; - assert_different_registers(cache, index, rax); + assert_different_registers(cache, obj, rax); __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr())); __ testl(rax, rax); __ jcc(Assembler::zero, L1); - __ get_cache_and_index_at_bcp(robj, RDX, 1); - + __ mov(entry, cache); if (is_static) { // Life is simple. Null out the object pointer. - __ xorl(RBX, RBX); + __ xorl(obj, obj); } else { // Life is harder. The stack holds the value on top, followed by @@ -3060,53 +3118,44 @@ void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is #ifndef _LP64 Label two_word, valsize_known; #endif - __ movl(RCX, Address(robj, RDX, - Address::times_ptr, - in_bytes(cp_base_offset + - ConstantPoolCacheEntry::flags_offset()))); - NOT_LP64(__ mov(rbx, rsp)); - __ shrl(RCX, ConstantPoolCacheEntry::tos_state_shift); - - // Make sure we don't need to mask rcx after the above shift - ConstantPoolCacheEntry::verify_tos_state_shift(); + __ load_unsigned_byte(value, Address(entry, in_bytes(ResolvedFieldEntry::type_offset()))); #ifdef _LP64 - __ movptr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue - __ cmpl(c_rarg3, ltos); + __ movptr(obj, at_tos_p1()); // initially assume a one word jvalue + __ cmpl(value, ltos); __ cmovptr(Assembler::equal, - c_rarg1, at_tos_p2()); // ltos (two word jvalue) - __ cmpl(c_rarg3, dtos); + obj, at_tos_p2()); // ltos (two word jvalue) + __ cmpl(value, dtos); __ cmovptr(Assembler::equal, - c_rarg1, at_tos_p2()); // dtos (two word jvalue) + obj, at_tos_p2()); // dtos (two word jvalue) #else - __ cmpl(rcx, ltos); + __ mov(obj, rsp); + __ cmpl(value, ltos); __ jccb(Assembler::equal, two_word); - __ cmpl(rcx, dtos); + __ cmpl(value, dtos); __ jccb(Assembler::equal, two_word); - __ addptr(rbx, Interpreter::expr_offset_in_bytes(1)); // one word jvalue (not ltos, dtos) + __ addptr(obj, Interpreter::expr_offset_in_bytes(1)); // one word jvalue (not ltos, dtos) __ jmpb(valsize_known); __ bind(two_word); - __ addptr(rbx, Interpreter::expr_offset_in_bytes(2)); // two words jvalue + __ addptr(obj, Interpreter::expr_offset_in_bytes(2)); // two words jvalue __ bind(valsize_known); // setup object pointer - __ movptr(rbx, Address(rbx, 0)); + __ movptr(obj, Address(obj, 0)); #endif } - // cache entry pointer - __ addptr(robj, in_bytes(cp_base_offset)); - __ shll(RDX, LogBytesPerWord); - __ addptr(robj, RDX); + // object (tos) - __ mov(RCX, rsp); - // c_rarg1: object pointer set up above (null if static) - // c_rarg2: cache entry pointer - // c_rarg3: jvalue object on the stack + __ mov(value, rsp); + // obj: object pointer set up above (null if static) + // cache: field entry pointer + // value: jvalue object on the stack __ call_VM(noreg, - CAST_FROM_FN_PTR(address, - InterpreterRuntime::post_field_modification), - RBX, robj, RCX); - __ get_cache_and_index_at_bcp(cache, index, 1); + CAST_FROM_FN_PTR(address, + InterpreterRuntime::post_field_modification), + obj, entry, value); + // Reload field entry + __ load_field_entry(cache, index); __ bind(L1); } } @@ -3114,42 +3163,41 @@ void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) { transition(vtos, vtos); + const Register obj = rcx; const Register cache = rcx; const Register index = rdx; - const Register obj = rcx; + const Register tos_state = rdx; const Register off = rbx; const Register flags = rax; - resolve_cache_and_index(byte_no, cache, index, sizeof(u2)); + resolve_cache_and_index_for_field(byte_no, cache, index); jvmti_post_field_mod(cache, index, is_static); - load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); + load_resolved_field_entry(obj, cache, tos_state, off, flags, is_static); // [jk] not needed currently // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore | // Assembler::StoreStore)); Label notVolatile, Done; - __ movl(rdx, flags); - __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift); - __ andl(rdx, 0x1); // Check for volatile store - __ testl(rdx, rdx); + __ andl(flags, (1 << ResolvedFieldEntry::is_volatile_shift)); + __ testl(flags, flags); __ jcc(Assembler::zero, notVolatile); - putfield_or_static_helper(byte_no, is_static, rc, obj, off, flags); + putfield_or_static_helper(byte_no, is_static, rc, obj, off, tos_state); volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad | Assembler::StoreStore)); __ jmp(Done); __ bind(notVolatile); - putfield_or_static_helper(byte_no, is_static, rc, obj, off, flags); + putfield_or_static_helper(byte_no, is_static, rc, obj, off, tos_state); __ bind(Done); } void TemplateTable::putfield_or_static_helper(int byte_no, bool is_static, RewriteControl rc, - Register obj, Register off, Register flags) { + Register obj, Register off, Register tos_state) { // field addresses const Address field(obj, off, Address::times_1, 0*wordSize); @@ -3161,10 +3209,8 @@ void TemplateTable::putfield_or_static_helper(int byte_no, bool is_static, Rewri const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx); - __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift); - - assert(btos == 0, "change code, btos != 0"); - __ andl(flags, ConstantPoolCacheEntry::tos_state_mask); + // Test TOS state + __ testl(tos_state, tos_state); __ jcc(Assembler::notZero, notByte); // btos @@ -3179,7 +3225,7 @@ void TemplateTable::putfield_or_static_helper(int byte_no, bool is_static, Rewri } __ bind(notByte); - __ cmpl(flags, ztos); + __ cmpl(tos_state, ztos); __ jcc(Assembler::notEqual, notBool); // ztos @@ -3194,7 +3240,7 @@ void TemplateTable::putfield_or_static_helper(int byte_no, bool is_static, Rewri } __ bind(notBool); - __ cmpl(flags, atos); + __ cmpl(tos_state, atos); __ jcc(Assembler::notEqual, notObj); // atos @@ -3210,7 +3256,7 @@ void TemplateTable::putfield_or_static_helper(int byte_no, bool is_static, Rewri } __ bind(notObj); - __ cmpl(flags, itos); + __ cmpl(tos_state, itos); __ jcc(Assembler::notEqual, notInt); // itos @@ -3225,7 +3271,7 @@ void TemplateTable::putfield_or_static_helper(int byte_no, bool is_static, Rewri } __ bind(notInt); - __ cmpl(flags, ctos); + __ cmpl(tos_state, ctos); __ jcc(Assembler::notEqual, notChar); // ctos @@ -3240,7 +3286,7 @@ void TemplateTable::putfield_or_static_helper(int byte_no, bool is_static, Rewri } __ bind(notChar); - __ cmpl(flags, stos); + __ cmpl(tos_state, stos); __ jcc(Assembler::notEqual, notShort); // stos @@ -3255,7 +3301,7 @@ void TemplateTable::putfield_or_static_helper(int byte_no, bool is_static, Rewri } __ bind(notShort); - __ cmpl(flags, ltos); + __ cmpl(tos_state, ltos); __ jcc(Assembler::notEqual, notLong); // ltos @@ -3273,7 +3319,7 @@ void TemplateTable::putfield_or_static_helper(int byte_no, bool is_static, Rewri } __ bind(notLong); - __ cmpl(flags, ftos); + __ cmpl(tos_state, ftos); __ jcc(Assembler::notEqual, notFloat); // ftos @@ -3290,7 +3336,7 @@ void TemplateTable::putfield_or_static_helper(int byte_no, bool is_static, Rewri __ bind(notFloat); #ifdef ASSERT Label notDouble; - __ cmpl(flags, dtos); + __ cmpl(tos_state, dtos); __ jcc(Assembler::notEqual, notDouble); #endif @@ -3360,8 +3406,8 @@ void TemplateTable::jvmti_post_fast_field_mod() { } __ mov(scratch, rsp); // points to jvalue on the stack // access constant pool cache entry - LP64_ONLY(__ get_cache_entry_pointer_at_bcp(c_rarg2, rax, 1)); - NOT_LP64(__ get_cache_entry_pointer_at_bcp(rax, rdx, 1)); + LP64_ONLY(__ load_field_entry(c_rarg2, rax)); + NOT_LP64(__ load_field_entry(rax, rdx)); __ verify_oop(rbx); // rbx: object pointer copied above // c_rarg2: cache entry pointer @@ -3388,29 +3434,18 @@ void TemplateTable::jvmti_post_fast_field_mod() { void TemplateTable::fast_storefield(TosState state) { transition(state, vtos); - ByteSize base = ConstantPoolCache::base_offset(); - - jvmti_post_fast_field_mod(); + Register cache = rcx; - // access constant pool cache - __ get_cache_and_index_at_bcp(rcx, rbx, 1); - - // test for volatile with rdx but rdx is tos register for lputfield. - __ movl(rdx, Address(rcx, rbx, Address::times_ptr, - in_bytes(base + - ConstantPoolCacheEntry::flags_offset()))); - - // replace index with field offset from cache entry - __ movptr(rbx, Address(rcx, rbx, Address::times_ptr, - in_bytes(base + ConstantPoolCacheEntry::f2_offset()))); + Label notVolatile, Done; - // [jk] not needed currently - // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore | - // Assembler::StoreStore)); + jvmti_post_fast_field_mod(); - Label notVolatile, Done; - __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift); - __ andl(rdx, 0x1); + __ push(rax); + __ load_field_entry(rcx, rax); + load_resolved_field_entry(noreg, cache, rax, rbx, rdx); + // RBX: field offset, RCX: RAX: TOS, RDX: flags + __ andl(rdx, (1 << ResolvedFieldEntry::is_volatile_shift)); + __ pop(rax); // Get object from stack pop_and_check_object(rcx); @@ -3485,8 +3520,8 @@ void TemplateTable::fast_accessfield(TosState state) { __ testl(rcx, rcx); __ jcc(Assembler::zero, L1); // access constant pool cache entry - LP64_ONLY(__ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1)); - NOT_LP64(__ get_cache_entry_pointer_at_bcp(rcx, rdx, 1)); + LP64_ONLY(__ load_field_entry(c_rarg2, rcx)); + NOT_LP64(__ load_field_entry(rcx, rdx)); __ verify_oop(rax); __ push_ptr(rax); // save object pointer before call_VM() clobbers it LP64_ONLY(__ mov(c_rarg1, rax)); @@ -3499,18 +3534,8 @@ void TemplateTable::fast_accessfield(TosState state) { } // access constant pool cache - __ get_cache_and_index_at_bcp(rcx, rbx, 1); - // replace index with field offset from cache entry - // [jk] not needed currently - // __ movl(rdx, Address(rcx, rbx, Address::times_8, - // in_bytes(ConstantPoolCache::base_offset() + - // ConstantPoolCacheEntry::flags_offset()))); - // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift); - // __ andl(rdx, 0x1); - // - __ movptr(rbx, Address(rcx, rbx, Address::times_ptr, - in_bytes(ConstantPoolCache::base_offset() + - ConstantPoolCacheEntry::f2_offset()))); + __ load_field_entry(rcx, rbx); + __ load_sized_value(rbx, Address(rcx, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/); // rax: object __ verify_oop(rax); @@ -3565,11 +3590,9 @@ void TemplateTable::fast_xaccess(TosState state) { // get receiver __ movptr(rax, aaddress(0)); // access constant pool cache - __ get_cache_and_index_at_bcp(rcx, rdx, 2); - __ movptr(rbx, - Address(rcx, rdx, Address::times_ptr, - in_bytes(ConstantPoolCache::base_offset() + - ConstantPoolCacheEntry::f2_offset()))); + __ load_field_entry(rcx, rdx, 2); + __ load_sized_value(rbx, Address(rcx, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/); + // make sure exception is reported in correct bcp range (getfield is // next instruction) __ increment(rbcp); diff --git a/src/hotspot/cpu/zero/zeroInterpreter_zero.cpp b/src/hotspot/cpu/zero/zeroInterpreter_zero.cpp index d92b43f0ea54e..9e00b1c5b0462 100644 --- a/src/hotspot/cpu/zero/zeroInterpreter_zero.cpp +++ b/src/hotspot/cpu/zero/zeroInterpreter_zero.cpp @@ -605,7 +605,7 @@ int ZeroInterpreter::getter_entry(Method* method, intptr_t UNUSED, TRAPS) { // Get the entry from the constant pool cache, and drop into // the slow path if it has not been resolved ConstantPoolCache* cache = method->constants()->cache(); - ConstantPoolCacheEntry* entry = cache->entry_at(index); + ResolvedFieldEntry* entry = cache->resolved_field_entry_at(index); if (!entry->is_resolved(Bytecodes::_getfield)) { return normal_entry(method, 0, THREAD); } @@ -622,7 +622,7 @@ int ZeroInterpreter::getter_entry(Method* method, intptr_t UNUSED, TRAPS) { // If needed, allocate additional slot on stack: we already have one // for receiver, and double/long need another one. - switch (entry->flag_state()) { + switch (entry->tos_state()) { case ltos: case dtos: stack->overflow_check(1, CHECK_0); @@ -634,12 +634,12 @@ int ZeroInterpreter::getter_entry(Method* method, intptr_t UNUSED, TRAPS) { } // Read the field to stack(0) - int offset = entry->f2_as_index(); + int offset = entry->field_offset(); if (entry->is_volatile()) { if (support_IRIW_for_not_multiple_copy_atomic_cpu) { OrderAccess::fence(); } - switch (entry->flag_state()) { + switch (entry->tos_state()) { case btos: case ztos: SET_STACK_INT(object->byte_field_acquire(offset), 0); break; case ctos: SET_STACK_INT(object->char_field_acquire(offset), 0); break; @@ -653,7 +653,7 @@ int ZeroInterpreter::getter_entry(Method* method, intptr_t UNUSED, TRAPS) { ShouldNotReachHere(); } } else { - switch (entry->flag_state()) { + switch (entry->tos_state()) { case btos: case ztos: SET_STACK_INT(object->byte_field(offset), 0); break; case ctos: SET_STACK_INT(object->char_field(offset), 0); break; @@ -696,7 +696,7 @@ int ZeroInterpreter::setter_entry(Method* method, intptr_t UNUSED, TRAPS) { // Get the entry from the constant pool cache, and drop into // the slow path if it has not been resolved ConstantPoolCache* cache = method->constants()->cache(); - ConstantPoolCacheEntry* entry = cache->entry_at(index); + ResolvedFieldEntry* entry = cache->resolved_field_entry_at(index); if (!entry->is_resolved(Bytecodes::_putfield)) { return normal_entry(method, 0, THREAD); } @@ -707,7 +707,7 @@ int ZeroInterpreter::setter_entry(Method* method, intptr_t UNUSED, TRAPS) { // Figure out where the receiver is. If there is a long/double // operand on stack top, then receiver is two slots down. oop object = nullptr; - switch (entry->flag_state()) { + switch (entry->tos_state()) { case ltos: case dtos: object = STACK_OBJECT(-2); @@ -724,9 +724,9 @@ int ZeroInterpreter::setter_entry(Method* method, intptr_t UNUSED, TRAPS) { } // Store the stack(0) to field - int offset = entry->f2_as_index(); + int offset = entry->field_offset(); if (entry->is_volatile()) { - switch (entry->flag_state()) { + switch (entry->tos_state()) { case btos: object->release_byte_field_put(offset, STACK_INT(0)); break; case ztos: object->release_byte_field_put(offset, STACK_INT(0) & 1); break; // only store LSB case ctos: object->release_char_field_put(offset, STACK_INT(0)); break; @@ -741,7 +741,7 @@ int ZeroInterpreter::setter_entry(Method* method, intptr_t UNUSED, TRAPS) { } OrderAccess::storeload(); } else { - switch (entry->flag_state()) { + switch (entry->tos_state()) { case btos: object->byte_field_put(offset, STACK_INT(0)); break; case ztos: object->byte_field_put(offset, STACK_INT(0) & 1); break; // only store LSB case ctos: object->char_field_put(offset, STACK_INT(0)); break; diff --git a/src/hotspot/share/ci/ciStreams.cpp b/src/hotspot/share/ci/ciStreams.cpp index 9d88638f9e447..0348eb033ad4c 100644 --- a/src/hotspot/share/ci/ciStreams.cpp +++ b/src/hotspot/share/ci/ciStreams.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -298,7 +298,7 @@ int ciBytecodeStream::get_field_index() { cur_bc() == Bytecodes::_putfield || cur_bc() == Bytecodes::_getstatic || cur_bc() == Bytecodes::_putstatic, "wrong bc"); - return get_index_u2_cpcache(); + return get_index_u2(); } diff --git a/src/hotspot/share/interpreter/bytecode.cpp b/src/hotspot/share/interpreter/bytecode.cpp index ff24ee5e08864..cbdf8987cb07a 100644 --- a/src/hotspot/share/interpreter/bytecode.cpp +++ b/src/hotspot/share/interpreter/bytecode.cpp @@ -136,7 +136,7 @@ Symbol* Bytecode_member_ref::klass() const { Symbol* Bytecode_member_ref::name() const { - return constants()->name_ref_at(index(), _code); + return constants()->name_ref_at(index(), Bytecodes::java_code(_code)); } @@ -164,6 +164,8 @@ int Bytecode_member_ref::index() const { Bytecodes::Code rawc = code(); if (has_index_u4(rawc)) return get_index_u4(rawc); + else if (Bytecodes::is_field_code(rawc)) + return get_index_u2(rawc); else return get_index_u2_cpcache(rawc); } diff --git a/src/hotspot/share/interpreter/bytecodeTracer.cpp b/src/hotspot/share/interpreter/bytecodeTracer.cpp index 8aa3abd46578d..432138e7d6993 100644 --- a/src/hotspot/share/interpreter/bytecodeTracer.cpp +++ b/src/hotspot/share/interpreter/bytecodeTracer.cpp @@ -35,6 +35,7 @@ #include "oops/constantPool.inline.hpp" #include "oops/methodData.hpp" #include "oops/method.hpp" +#include "oops/resolvedFieldEntry.hpp" #include "oops/resolvedIndyEntry.hpp" #include "runtime/handles.inline.hpp" #include "runtime/mutexLocker.hpp" @@ -501,8 +502,8 @@ void BytecodePrinter::print_attributes(int bci, outputStream* st) { { int cp_index; if (is_linked()) { - int cpcache_index = get_native_index_u2(); - cp_index = cpcache()->entry_at(cpcache_index)->constant_pool_index(); + int field_index = get_native_index_u2(); + cp_index = cpcache()->resolved_field_entry_at(field_index)->constant_pool_index(); } else { cp_index = get_Java_index_u2(); } diff --git a/src/hotspot/share/interpreter/bytecodeUtils.cpp b/src/hotspot/share/interpreter/bytecodeUtils.cpp index 97b8441365f13..1954c839cee69 100644 --- a/src/hotspot/share/interpreter/bytecodeUtils.cpp +++ b/src/hotspot/share/interpreter/bytecodeUtils.cpp @@ -968,7 +968,7 @@ int ExceptionMessageBuilder::do_instruction(int bci) { case Bytecodes::_getstatic: case Bytecodes::_getfield: { // Find out the type of the field accessed. - int cp_index = Bytes::get_native_u2(code_base + pos) DEBUG_ONLY(+ ConstantPool::CPCACHE_INDEX_TAG); + int cp_index = Bytes::get_native_u2(code_base + pos); ConstantPool* cp = _method->constants(); int name_and_type_index = cp->name_and_type_ref_index_at(cp_index, code); int type_index = cp->signature_ref_index_at(name_and_type_index); @@ -982,7 +982,7 @@ int ExceptionMessageBuilder::do_instruction(int bci) { case Bytecodes::_putstatic: case Bytecodes::_putfield: { - int cp_index = Bytes::get_native_u2(code_base + pos) DEBUG_ONLY(+ ConstantPool::CPCACHE_INDEX_TAG); + int cp_index = Bytes::get_native_u2(code_base + pos); ConstantPool* cp = _method->constants(); int name_and_type_index = cp->name_and_type_ref_index_at(cp_index, code); int type_index = cp->signature_ref_index_at(name_and_type_index); @@ -1132,7 +1132,7 @@ int ExceptionMessageBuilder::get_NPE_null_slot(int bci) { case Bytecodes::_dastore: return 3; case Bytecodes::_putfield: { - int cp_index = Bytes::get_native_u2(code_base + pos) DEBUG_ONLY(+ ConstantPool::CPCACHE_INDEX_TAG); + int cp_index = Bytes::get_native_u2(code_base + pos); ConstantPool* cp = _method->constants(); int name_and_type_index = cp->name_and_type_ref_index_at(cp_index, code); int type_index = cp->signature_ref_index_at(name_and_type_index); @@ -1326,7 +1326,7 @@ bool ExceptionMessageBuilder::print_NPE_cause0(outputStream* os, int bci, int sl } case Bytecodes::_getstatic: { - int cp_index = Bytes::get_native_u2(code_base + pos) + ConstantPool::CPCACHE_INDEX_TAG; + int cp_index = Bytes::get_native_u2(code_base + pos); print_field_and_class(os, _method, cp_index, code); return true; } @@ -1337,7 +1337,7 @@ bool ExceptionMessageBuilder::print_NPE_cause0(outputStream* os, int bci, int sl if (print_NPE_cause0(os, source_bci, 0, max_detail - 1, inner_expr)) { os->print("."); } - int cp_index = Bytes::get_native_u2(code_base + pos) + ConstantPool::CPCACHE_INDEX_TAG; + int cp_index = Bytes::get_native_u2(code_base + pos); os->print("%s", get_field_name(_method, cp_index, code)); return true; } @@ -1414,7 +1414,7 @@ void ExceptionMessageBuilder::print_NPE_failed_action(outputStream *os, int bci) case Bytecodes::_monitorexit: os->print("Cannot exit synchronized block"); break; case Bytecodes::_getfield: { - int cp_index = Bytes::get_native_u2(code_base + pos) DEBUG_ONLY(+ ConstantPool::CPCACHE_INDEX_TAG); + int cp_index = Bytes::get_native_u2(code_base + pos); ConstantPool* cp = _method->constants(); int name_and_type_index = cp->name_and_type_ref_index_at(cp_index, code); int name_index = cp->name_ref_index_at(name_and_type_index); @@ -1422,7 +1422,7 @@ void ExceptionMessageBuilder::print_NPE_failed_action(outputStream *os, int bci) os->print("Cannot read field \"%s\"", name->as_C_string()); } break; case Bytecodes::_putfield: { - int cp_index = Bytes::get_native_u2(code_base + pos) DEBUG_ONLY(+ ConstantPool::CPCACHE_INDEX_TAG); + int cp_index = Bytes::get_native_u2(code_base + pos); os->print("Cannot assign field \"%s\"", get_field_name(_method, cp_index, code)); } break; case Bytecodes::_invokevirtual: diff --git a/src/hotspot/share/interpreter/bytecodes.hpp b/src/hotspot/share/interpreter/bytecodes.hpp index 5bd2e5663c8a3..629cca706aeb4 100644 --- a/src/hotspot/share/interpreter/bytecodes.hpp +++ b/src/hotspot/share/interpreter/bytecodes.hpp @@ -419,6 +419,7 @@ class Bytecodes: AllStatic { || code == _fconst_0 || code == _dconst_0); } static bool is_return (Code code) { return (_ireturn <= code && code <= _return); } static bool is_invoke (Code code) { return (_invokevirtual <= code && code <= _invokedynamic); } + static bool is_field_code (Code code) { return (_getstatic <= java_code(code) && java_code(code) <= _putfield); } static bool has_receiver (Code code) { assert(is_invoke(code), ""); return code == _invokevirtual || code == _invokespecial || code == _invokeinterface; } diff --git a/src/hotspot/share/interpreter/interpreterRuntime.cpp b/src/hotspot/share/interpreter/interpreterRuntime.cpp index ddb6ca32108cd..335903089761b 100644 --- a/src/hotspot/share/interpreter/interpreterRuntime.cpp +++ b/src/hotspot/share/interpreter/interpreterRuntime.cpp @@ -43,7 +43,7 @@ #include "memory/oopFactory.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" -#include "oops/constantPool.hpp" +#include "oops/constantPool.inline.hpp" #include "oops/cpCache.inline.hpp" #include "oops/instanceKlass.inline.hpp" #include "oops/klass.inline.hpp" @@ -664,16 +664,17 @@ void InterpreterRuntime::resolve_get_put(JavaThread* current, Bytecodes::Code by bytecode == Bytecodes::_putstatic); bool is_static = (bytecode == Bytecodes::_getstatic || bytecode == Bytecodes::_putstatic); + int field_index = last_frame.get_index_u2(bytecode); { JvmtiHideSingleStepping jhss(current); JavaThread* THREAD = current; // For exception macros. - LinkResolver::resolve_field_access(info, pool, last_frame.get_index_u2_cpcache(bytecode), + LinkResolver::resolve_field_access(info, pool, field_index, m, bytecode, CHECK); } // end JvmtiHideSingleStepping // check if link resolution caused cpCache to be updated - ConstantPoolCacheEntry* cp_cache_entry = last_frame.cache_entry(); - if (cp_cache_entry->is_resolved(bytecode)) return; + if (pool->resolved_field_entry_at(field_index)->is_resolved(bytecode)) return; + // compute auxiliary field attributes TosState state = as_TosState(info.field_type()); @@ -713,16 +714,9 @@ void InterpreterRuntime::resolve_get_put(JavaThread* current, Bytecodes::Code by } } - cp_cache_entry->set_field( - get_code, - put_code, - info.field_holder(), - info.index(), - info.offset(), - state, - info.access_flags().is_final(), - info.access_flags().is_volatile() - ); + ResolvedFieldEntry* entry = pool->resolved_field_entry_at(field_index); + entry->set_flags(info.access_flags().is_final(), info.access_flags().is_volatile()); + entry->fill_in(info.field_holder(), info.offset(), (u2)info.index(), (u1)state, (u1)get_code, (u1)put_code); } @@ -1165,12 +1159,12 @@ JRT_LEAF(void, InterpreterRuntime::at_unwind(JavaThread* current)) JRT_END JRT_ENTRY(void, InterpreterRuntime::post_field_access(JavaThread* current, oopDesc* obj, - ConstantPoolCacheEntry *cp_entry)) + ResolvedFieldEntry *entry)) // check the access_flags for the field in the klass - InstanceKlass* ik = InstanceKlass::cast(cp_entry->f1_as_klass()); - int index = cp_entry->field_index(); + InstanceKlass* ik = entry->field_holder(); + int index = entry->field_index(); if (!ik->field_status(index).is_access_watched()) return; bool is_static = (obj == nullptr); @@ -1181,26 +1175,25 @@ JRT_ENTRY(void, InterpreterRuntime::post_field_access(JavaThread* current, oopDe // non-static field accessors have an object, but we need a handle h_obj = Handle(current, obj); } - InstanceKlass* cp_entry_f1 = InstanceKlass::cast(cp_entry->f1_as_klass()); - jfieldID fid = jfieldIDWorkaround::to_jfieldID(cp_entry_f1, cp_entry->f2_as_index(), is_static); + InstanceKlass* field_holder = entry->field_holder(); // HERE + jfieldID fid = jfieldIDWorkaround::to_jfieldID(field_holder, entry->field_offset(), is_static); LastFrameAccessor last_frame(current); - JvmtiExport::post_field_access(current, last_frame.method(), last_frame.bcp(), cp_entry_f1, h_obj, fid); + JvmtiExport::post_field_access(current, last_frame.method(), last_frame.bcp(), field_holder, h_obj, fid); JRT_END JRT_ENTRY(void, InterpreterRuntime::post_field_modification(JavaThread* current, oopDesc* obj, - ConstantPoolCacheEntry *cp_entry, jvalue *value)) + ResolvedFieldEntry *entry, jvalue *value)) - Klass* k = cp_entry->f1_as_klass(); + InstanceKlass* ik = entry->field_holder(); // check the access_flags for the field in the klass - InstanceKlass* ik = InstanceKlass::cast(k); - int index = cp_entry->field_index(); + int index = entry->field_index(); // bail out if field modifications are not watched if (!ik->field_status(index).is_modification_watched()) return; char sig_type = '\0'; - switch(cp_entry->flag_state()) { + switch((TosState)entry->tos_state()) { case btos: sig_type = JVM_SIGNATURE_BYTE; break; case ztos: sig_type = JVM_SIGNATURE_BOOLEAN; break; case ctos: sig_type = JVM_SIGNATURE_CHAR; break; @@ -1215,7 +1208,7 @@ JRT_ENTRY(void, InterpreterRuntime::post_field_modification(JavaThread* current, bool is_static = (obj == nullptr); HandleMark hm(current); - jfieldID fid = jfieldIDWorkaround::to_jfieldID(ik, cp_entry->f2_as_index(), is_static); + jfieldID fid = jfieldIDWorkaround::to_jfieldID(ik, entry->field_offset(), is_static); jvalue fvalue; #ifdef _LP64 fvalue = *value; diff --git a/src/hotspot/share/interpreter/interpreterRuntime.hpp b/src/hotspot/share/interpreter/interpreterRuntime.hpp index 97cfcb1eae621..297585d37e849 100644 --- a/src/hotspot/share/interpreter/interpreterRuntime.hpp +++ b/src/hotspot/share/interpreter/interpreterRuntime.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -120,9 +120,9 @@ class InterpreterRuntime: AllStatic { // Debugger support static void post_field_access(JavaThread* current, oopDesc* obj, - ConstantPoolCacheEntry *cp_entry); + ResolvedFieldEntry* entry); static void post_field_modification(JavaThread* current, oopDesc* obj, - ConstantPoolCacheEntry *cp_entry, jvalue *value); + ResolvedFieldEntry* entry, jvalue *value); static void post_method_entry(JavaThread* current); static void post_method_exit (JavaThread* current); static int interpreter_contains(address pc); diff --git a/src/hotspot/share/interpreter/rewriter.cpp b/src/hotspot/share/interpreter/rewriter.cpp index 6db1a64c94b68..6dc301ffa76fd 100644 --- a/src/hotspot/share/interpreter/rewriter.cpp +++ b/src/hotspot/share/interpreter/rewriter.cpp @@ -32,6 +32,7 @@ #include "memory/metadataFactory.hpp" #include "memory/resourceArea.hpp" #include "oops/generateOopMap.hpp" +#include "oops/resolvedFieldEntry.hpp" #include "oops/resolvedIndyEntry.hpp" #include "prims/methodHandles.hpp" #include "runtime/arguments.hpp" @@ -49,9 +50,13 @@ void Rewriter::compute_index_maps() { for (int i = 0; i < length; i++) { int tag = _pool->tag_at(i).value(); switch (tag) { - case JVM_CONSTANT_InterfaceMethodref: - case JVM_CONSTANT_Fieldref : // fall through - case JVM_CONSTANT_Methodref : // fall through + case JVM_CONSTANT_Fieldref : + _cp_map.at_put(i, _field_entry_index); + _field_entry_index++; + _initialized_field_entries.push(ResolvedFieldEntry((u2)i)); + break; + case JVM_CONSTANT_InterfaceMethodref: // fall through + case JVM_CONSTANT_Methodref : add_cp_cache_entry(i); break; case JVM_CONSTANT_Dynamic: @@ -100,7 +105,7 @@ void Rewriter::make_constant_pool_cache(TRAPS) { ClassLoaderData* loader_data = _pool->pool_holder()->class_loader_data(); ConstantPoolCache* cache = ConstantPoolCache::allocate(loader_data, _cp_cache_map, - _invokedynamic_references_map, _initialized_indy_entries, CHECK); + _invokedynamic_references_map, _initialized_indy_entries, _initialized_field_entries, CHECK); // initialize object cache in constant pool _pool->set_cache(cache); @@ -175,6 +180,19 @@ void Rewriter::rewrite_Object_init(const methodHandle& method, TRAPS) { } +void Rewriter::rewrite_field_reference(address bcp, int offset, bool reverse) { + address p = bcp + offset; + if (!reverse) { + int cp_index = Bytes::get_Java_u2(p); + int field_entry_index = _cp_map.at(cp_index); + Bytes::put_native_u2(p, field_entry_index); + } else { + int field_entry_index = Bytes::get_native_u2(p); + int pool_index = _initialized_field_entries.at(field_entry_index).constant_pool_index(); + Bytes::put_Java_u2(p, pool_index); + } +} + // Rewrite a classfile-order CP index into a native-order CPC index. void Rewriter::rewrite_member_reference(address bcp, int offset, bool reverse) { address p = bcp + offset; @@ -449,6 +467,8 @@ void Rewriter::scan_method(Thread* thread, Method* method, bool reverse, bool* i // fall through case Bytecodes::_getstatic : // fall through case Bytecodes::_getfield : // fall through + rewrite_field_reference(bcp, prefix_length+1, reverse); + break; case Bytecodes::_invokevirtual : // fall through case Bytecodes::_invokestatic : case Bytecodes::_invokeinterface: @@ -564,7 +584,8 @@ Rewriter::Rewriter(InstanceKlass* klass, const constantPoolHandle& cpool, Array< _resolved_references_map(cpool->length() / 2), _invokedynamic_references_map(cpool->length() / 2), _method_handle_invokers(cpool->length()), - _invokedynamic_index(0) + _invokedynamic_index(0), + _field_entry_index(0) { // Rewrite bytecodes - exception here exits. diff --git a/src/hotspot/share/interpreter/rewriter.hpp b/src/hotspot/share/interpreter/rewriter.hpp index d5b4f7c3dc4ff..ec3bbfd55d08f 100644 --- a/src/hotspot/share/interpreter/rewriter.hpp +++ b/src/hotspot/share/interpreter/rewriter.hpp @@ -27,6 +27,7 @@ #include "memory/allocation.hpp" #include "oops/constantPool.hpp" +#include "oops/resolvedFieldEntry.hpp" #include "oops/resolvedIndyEntry.hpp" #include "utilities/growableArray.hpp" @@ -47,10 +48,12 @@ class Rewriter: public StackObj { GrowableArray _method_handle_invokers; int _resolved_reference_limit; int _invokedynamic_index; + int _field_entry_index; // For collecting information about invokedynamic bytecodes before resolution // With this, we can know how many indy calls there are and resolve them later GrowableArray _initialized_indy_entries; + GrowableArray _initialized_field_entries; void init_maps(int length) { _cp_map.trunc_to(0); @@ -163,6 +166,7 @@ class Rewriter: public StackObj { void make_constant_pool_cache(TRAPS); void scan_method(Thread* thread, Method* m, bool reverse, bool* invokespecial_error); void rewrite_Object_init(const methodHandle& m, TRAPS); + void rewrite_field_reference(address bcp, int offset, bool reverse); void rewrite_member_reference(address bcp, int offset, bool reverse); void maybe_rewrite_invokehandle(address opc, int cp_index, int cache_index, bool reverse); void rewrite_invokedynamic(address bcp, int offset, bool reverse); diff --git a/src/hotspot/share/interpreter/templateTable.hpp b/src/hotspot/share/interpreter/templateTable.hpp index e8409b1dc0fd5..27f7931d2c7e7 100644 --- a/src/hotspot/share/interpreter/templateTable.hpp +++ b/src/hotspot/share/interpreter/templateTable.hpp @@ -266,7 +266,16 @@ class TemplateTable: AllStatic { Register cache, // output for CP cache Register index, // output for CP index size_t index_size); // one of 1,2,4 + static void resolve_cache_and_index_for_field(int byte_no, + Register cache, + Register index); static void load_invokedynamic_entry(Register method); + static void load_resolved_field_entry(Register obj, + Register cache, + Register tos_state, + Register off, + Register flags, + bool is_static); static void load_invoke_cp_cache_entry(int byte_no, Register method, Register itable_index, diff --git a/src/hotspot/share/interpreter/zero/bytecodeInterpreter.cpp b/src/hotspot/share/interpreter/zero/bytecodeInterpreter.cpp index 5e95fc1d457fc..356bb7c940ba5 100644 --- a/src/hotspot/share/interpreter/zero/bytecodeInterpreter.cpp +++ b/src/hotspot/share/interpreter/zero/bytecodeInterpreter.cpp @@ -45,6 +45,7 @@ #include "oops/objArrayKlass.hpp" #include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" +#include "oops/resolvedFieldEntry.hpp" #include "oops/resolvedIndyEntry.hpp" #include "oops/typeArrayOop.inline.hpp" #include "prims/jvmtiExport.hpp" @@ -406,7 +407,7 @@ JRT_END target = obj; \ } \ CALL_VM(InterpreterRuntime::post_field_access(THREAD, \ - target, cache), \ + target, entry), \ handle_exception); \ } \ } \ @@ -426,7 +427,7 @@ JRT_END target = obj; \ } \ CALL_VM(InterpreterRuntime::post_field_modification(THREAD, \ - target, cache, \ + target, entry, \ (jvalue*)STACK_SLOT(-1)), \ handle_exception); \ } \ @@ -1722,8 +1723,8 @@ void BytecodeInterpreter::run(interpreterState istate) { CASE(_getstatic): { u2 index; - ConstantPoolCacheEntry* cache; index = Bytes::get_native_u2(pc+1); + ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index); // QQQ Need to make this as inlined as possible. Probably need to // split all the bytecode cases out so c++ compiler has a chance @@ -1736,26 +1737,25 @@ void BytecodeInterpreter::run(interpreterState istate) { code = Bytecodes::_getfield; } - cache = cp->entry_at(index); - if (!cache->is_resolved(code)) { + if (!entry->is_resolved(code)) { CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, code), handle_exception); - cache = cp->entry_at(index); + entry = cp->resolved_field_entry_at(index); } oop obj; if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) { - Klass* k = cache->f1_as_klass(); + Klass* k = entry->field_holder(); obj = k->java_mirror(); MORE_STACK(1); // Assume single slot push } else { obj = STACK_OBJECT(-1); CHECK_NULL(obj); // Check if we can rewrite non-volatile _getfield to one of the _fast_Xgetfield. - if (REWRITE_BYTECODES && !cache->is_volatile() && + if (REWRITE_BYTECODES && !entry->is_volatile() && ((Bytecodes::Code)opcode != Bytecodes::_nofast_getfield)) { // Rewrite current BC to _fast_Xgetfield. - REWRITE_AT_PC(fast_get_type(cache->flag_state())); + REWRITE_AT_PC(fast_get_type((TosState)(entry->tos_state()))); } } @@ -1764,9 +1764,9 @@ void BytecodeInterpreter::run(interpreterState istate) { // // Now store the result on the stack // - TosState tos_type = cache->flag_state(); - int field_offset = cache->f2_as_index(); - if (cache->is_volatile()) { + TosState tos_type = (TosState)(entry->tos_state()); + int field_offset = entry->field_offset(); + if (entry->is_volatile()) { if (support_IRIW_for_not_multiple_copy_atomic_cpu) { OrderAccess::fence(); } @@ -1842,14 +1842,14 @@ void BytecodeInterpreter::run(interpreterState istate) { } UPDATE_PC_AND_CONTINUE(3); - } + } CASE(_putfield): CASE(_nofast_putfield): CASE(_putstatic): { u2 index = Bytes::get_native_u2(pc+1); - ConstantPoolCacheEntry* cache = cp->entry_at(index); + ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index); // Interpreter runtime does not expect "nofast" opcodes, // prepare the vanilla opcode for it. @@ -1858,10 +1858,10 @@ void BytecodeInterpreter::run(interpreterState istate) { code = Bytecodes::_putfield; } - if (!cache->is_resolved(code)) { + if (!entry->is_resolved(code)) { CALL_VM(InterpreterRuntime::resolve_from_cache(THREAD, code), handle_exception); - cache = cp->entry_at(index); + entry = cp->resolved_field_entry_at(index); } // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases @@ -1869,14 +1869,14 @@ void BytecodeInterpreter::run(interpreterState istate) { oop obj; int count; - TosState tos_type = cache->flag_state(); + TosState tos_type = (TosState)(entry->tos_state()); count = -1; if (tos_type == ltos || tos_type == dtos) { --count; } if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) { - Klass* k = cache->f1_as_klass(); + Klass* k = entry->field_holder(); obj = k->java_mirror(); } else { --count; @@ -1884,10 +1884,10 @@ void BytecodeInterpreter::run(interpreterState istate) { CHECK_NULL(obj); // Check if we can rewrite non-volatile _putfield to one of the _fast_Xputfield. - if (REWRITE_BYTECODES && !cache->is_volatile() && + if (REWRITE_BYTECODES && !entry->is_volatile() && ((Bytecodes::Code)opcode != Bytecodes::_nofast_putfield)) { // Rewrite current BC to _fast_Xputfield. - REWRITE_AT_PC(fast_put_type(cache->flag_state())); + REWRITE_AT_PC(fast_put_type((TosState)(entry->tos_state()))); } } @@ -1896,8 +1896,8 @@ void BytecodeInterpreter::run(interpreterState istate) { // // Now store the result // - int field_offset = cache->f2_as_index(); - if (cache->is_volatile()) { + int field_offset = entry->field_offset(); + if (entry->is_volatile()) { switch (tos_type) { case ztos: obj->release_byte_field_put(field_offset, (STACK_INT(-1) & 1)); // only store LSB @@ -2585,8 +2585,8 @@ void BytecodeInterpreter::run(interpreterState istate) { CASE(_fast_agetfield): { u2 index = Bytes::get_native_u2(pc+1); - ConstantPoolCacheEntry* cache = cp->entry_at(index); - int field_offset = cache->f2_as_index(); + ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index); + int field_offset = entry->field_offset(); oop obj = STACK_OBJECT(-1); CHECK_NULL(obj); @@ -2600,8 +2600,8 @@ void BytecodeInterpreter::run(interpreterState istate) { CASE(_fast_bgetfield): { u2 index = Bytes::get_native_u2(pc+1); - ConstantPoolCacheEntry* cache = cp->entry_at(index); - int field_offset = cache->f2_as_index(); + ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index); + int field_offset = entry->field_offset(); oop obj = STACK_OBJECT(-1); CHECK_NULL(obj); @@ -2614,8 +2614,8 @@ void BytecodeInterpreter::run(interpreterState istate) { CASE(_fast_cgetfield): { u2 index = Bytes::get_native_u2(pc+1); - ConstantPoolCacheEntry* cache = cp->entry_at(index); - int field_offset = cache->f2_as_index(); + ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index); + int field_offset = entry->field_offset(); oop obj = STACK_OBJECT(-1); CHECK_NULL(obj); @@ -2628,8 +2628,8 @@ void BytecodeInterpreter::run(interpreterState istate) { CASE(_fast_dgetfield): { u2 index = Bytes::get_native_u2(pc+1); - ConstantPoolCacheEntry* cache = cp->entry_at(index); - int field_offset = cache->f2_as_index(); + ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index); + int field_offset = entry->field_offset(); oop obj = STACK_OBJECT(-1); CHECK_NULL(obj); @@ -2643,8 +2643,8 @@ void BytecodeInterpreter::run(interpreterState istate) { CASE(_fast_fgetfield): { u2 index = Bytes::get_native_u2(pc+1); - ConstantPoolCacheEntry* cache = cp->entry_at(index); - int field_offset = cache->f2_as_index(); + ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index); + int field_offset = entry->field_offset(); oop obj = STACK_OBJECT(-1); CHECK_NULL(obj); @@ -2657,8 +2657,8 @@ void BytecodeInterpreter::run(interpreterState istate) { CASE(_fast_igetfield): { u2 index = Bytes::get_native_u2(pc+1); - ConstantPoolCacheEntry* cache = cp->entry_at(index); - int field_offset = cache->f2_as_index(); + ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index); + int field_offset = entry->field_offset(); oop obj = STACK_OBJECT(-1); CHECK_NULL(obj); @@ -2671,8 +2671,8 @@ void BytecodeInterpreter::run(interpreterState istate) { CASE(_fast_lgetfield): { u2 index = Bytes::get_native_u2(pc+1); - ConstantPoolCacheEntry* cache = cp->entry_at(index); - int field_offset = cache->f2_as_index(); + ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index); + int field_offset = entry->field_offset(); oop obj = STACK_OBJECT(-1); CHECK_NULL(obj); @@ -2686,8 +2686,8 @@ void BytecodeInterpreter::run(interpreterState istate) { CASE(_fast_sgetfield): { u2 index = Bytes::get_native_u2(pc+1); - ConstantPoolCacheEntry* cache = cp->entry_at(index); - int field_offset = cache->f2_as_index(); + ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index); + int field_offset = entry->field_offset(); oop obj = STACK_OBJECT(-1); CHECK_NULL(obj); @@ -2700,14 +2700,14 @@ void BytecodeInterpreter::run(interpreterState istate) { CASE(_fast_aputfield): { u2 index = Bytes::get_native_u2(pc+1); - ConstantPoolCacheEntry* cache = cp->entry_at(index); + ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index); oop obj = STACK_OBJECT(-2); CHECK_NULL(obj); MAYBE_POST_FIELD_MODIFICATION(obj); - int field_offset = cache->f2_as_index(); + int field_offset = entry->field_offset(); obj->obj_field_put(field_offset, STACK_OBJECT(-1)); UPDATE_PC_AND_TOS_AND_CONTINUE(3, -2); @@ -2715,14 +2715,14 @@ void BytecodeInterpreter::run(interpreterState istate) { CASE(_fast_bputfield): { u2 index = Bytes::get_native_u2(pc+1); - ConstantPoolCacheEntry* cache = cp->entry_at(index); + ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index); oop obj = STACK_OBJECT(-2); CHECK_NULL(obj); MAYBE_POST_FIELD_MODIFICATION(obj); - int field_offset = cache->f2_as_index(); + int field_offset = entry->field_offset(); obj->byte_field_put(field_offset, STACK_INT(-1)); UPDATE_PC_AND_TOS_AND_CONTINUE(3, -2); @@ -2730,14 +2730,14 @@ void BytecodeInterpreter::run(interpreterState istate) { CASE(_fast_zputfield): { u2 index = Bytes::get_native_u2(pc+1); - ConstantPoolCacheEntry* cache = cp->entry_at(index); + ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index); oop obj = STACK_OBJECT(-2); CHECK_NULL(obj); MAYBE_POST_FIELD_MODIFICATION(obj); - int field_offset = cache->f2_as_index(); + int field_offset = entry->field_offset(); obj->byte_field_put(field_offset, (STACK_INT(-1) & 1)); // only store LSB UPDATE_PC_AND_TOS_AND_CONTINUE(3, -2); @@ -2745,14 +2745,14 @@ void BytecodeInterpreter::run(interpreterState istate) { CASE(_fast_cputfield): { u2 index = Bytes::get_native_u2(pc+1); - ConstantPoolCacheEntry* cache = cp->entry_at(index); + ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index); oop obj = STACK_OBJECT(-2); CHECK_NULL(obj); MAYBE_POST_FIELD_MODIFICATION(obj); - int field_offset = cache->f2_as_index(); + int field_offset = entry->field_offset(); obj->char_field_put(field_offset, STACK_INT(-1)); UPDATE_PC_AND_TOS_AND_CONTINUE(3, -2); @@ -2760,14 +2760,14 @@ void BytecodeInterpreter::run(interpreterState istate) { CASE(_fast_dputfield): { u2 index = Bytes::get_native_u2(pc+1); - ConstantPoolCacheEntry* cache = cp->entry_at(index); + ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index); oop obj = STACK_OBJECT(-3); CHECK_NULL(obj); MAYBE_POST_FIELD_MODIFICATION(obj); - int field_offset = cache->f2_as_index(); + int field_offset = entry->field_offset(); obj->double_field_put(field_offset, STACK_DOUBLE(-1)); UPDATE_PC_AND_TOS_AND_CONTINUE(3, -3); @@ -2775,14 +2775,14 @@ void BytecodeInterpreter::run(interpreterState istate) { CASE(_fast_fputfield): { u2 index = Bytes::get_native_u2(pc+1); - ConstantPoolCacheEntry* cache = cp->entry_at(index); + ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index); oop obj = STACK_OBJECT(-2); CHECK_NULL(obj); MAYBE_POST_FIELD_MODIFICATION(obj); - int field_offset = cache->f2_as_index(); + int field_offset = entry->field_offset(); obj->float_field_put(field_offset, STACK_FLOAT(-1)); UPDATE_PC_AND_TOS_AND_CONTINUE(3, -2); @@ -2790,14 +2790,14 @@ void BytecodeInterpreter::run(interpreterState istate) { CASE(_fast_iputfield): { u2 index = Bytes::get_native_u2(pc+1); - ConstantPoolCacheEntry* cache = cp->entry_at(index); + ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index); oop obj = STACK_OBJECT(-2); CHECK_NULL(obj); MAYBE_POST_FIELD_MODIFICATION(obj); - int field_offset = cache->f2_as_index(); + int field_offset = entry->field_offset(); obj->int_field_put(field_offset, STACK_INT(-1)); UPDATE_PC_AND_TOS_AND_CONTINUE(3, -2); @@ -2805,14 +2805,14 @@ void BytecodeInterpreter::run(interpreterState istate) { CASE(_fast_lputfield): { u2 index = Bytes::get_native_u2(pc+1); - ConstantPoolCacheEntry* cache = cp->entry_at(index); + ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index); oop obj = STACK_OBJECT(-3); CHECK_NULL(obj); MAYBE_POST_FIELD_MODIFICATION(obj); - int field_offset = cache->f2_as_index(); + int field_offset = entry->field_offset(); obj->long_field_put(field_offset, STACK_LONG(-1)); UPDATE_PC_AND_TOS_AND_CONTINUE(3, -3); @@ -2820,14 +2820,14 @@ void BytecodeInterpreter::run(interpreterState istate) { CASE(_fast_sputfield): { u2 index = Bytes::get_native_u2(pc+1); - ConstantPoolCacheEntry* cache = cp->entry_at(index); + ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index); oop obj = STACK_OBJECT(-2); CHECK_NULL(obj); MAYBE_POST_FIELD_MODIFICATION(obj); - int field_offset = cache->f2_as_index(); + int field_offset = entry->field_offset(); obj->short_field_put(field_offset, STACK_INT(-1)); UPDATE_PC_AND_TOS_AND_CONTINUE(3, -2); @@ -2842,8 +2842,8 @@ void BytecodeInterpreter::run(interpreterState istate) { CASE(_fast_aaccess_0): { u2 index = Bytes::get_native_u2(pc+2); - ConstantPoolCacheEntry* cache = cp->entry_at(index); - int field_offset = cache->f2_as_index(); + ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index); + int field_offset = entry->field_offset(); oop obj = LOCALS_OBJECT(0); CHECK_NULL(obj); @@ -2858,8 +2858,8 @@ void BytecodeInterpreter::run(interpreterState istate) { CASE(_fast_iaccess_0): { u2 index = Bytes::get_native_u2(pc+2); - ConstantPoolCacheEntry* cache = cp->entry_at(index); - int field_offset = cache->f2_as_index(); + ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index); + int field_offset = entry->field_offset(); oop obj = LOCALS_OBJECT(0); CHECK_NULL(obj); @@ -2873,8 +2873,8 @@ void BytecodeInterpreter::run(interpreterState istate) { CASE(_fast_faccess_0): { u2 index = Bytes::get_native_u2(pc+2); - ConstantPoolCacheEntry* cache = cp->entry_at(index); - int field_offset = cache->f2_as_index(); + ResolvedFieldEntry* entry = cp->resolved_field_entry_at(index); + int field_offset = entry->field_offset(); oop obj = LOCALS_OBJECT(0); CHECK_NULL(obj); diff --git a/src/hotspot/share/oops/constantPool.cpp b/src/hotspot/share/oops/constantPool.cpp index f697164c630da..9840ba5cf66ad 100644 --- a/src/hotspot/share/oops/constantPool.cpp +++ b/src/hotspot/share/oops/constantPool.cpp @@ -685,8 +685,7 @@ int ConstantPool::to_cp_index(int index, Bytecodes::Code code) { case Bytecodes::_getstatic: case Bytecodes::_putfield: case Bytecodes::_putstatic: - // TODO: handle resolved field entries with new structure - // i = .... + return resolved_field_entry_at(index)->constant_pool_index(); case Bytecodes::_invokeinterface: case Bytecodes::_invokehandle: case Bytecodes::_invokespecial: @@ -737,7 +736,6 @@ u2 ConstantPool::klass_ref_index_at(int index, Bytecodes::Code code) { return uncached_klass_ref_index_at(to_cp_index(index, code)); } - int ConstantPool::remap_instruction_operand_from_cache(int operand) { int cpc_index = operand; DEBUG_ONLY(cpc_index -= CPCACHE_INDEX_TAG); diff --git a/src/hotspot/share/oops/constantPool.hpp b/src/hotspot/share/oops/constantPool.hpp index 9563b364a259c..5a92ece45124d 100644 --- a/src/hotspot/share/oops/constantPool.hpp +++ b/src/hotspot/share/oops/constantPool.hpp @@ -919,6 +919,10 @@ class ConstantPool : public Metadata { const char* internal_name() const { return "{constant pool}"; } + // ResolvedFieldEntry getters + inline ResolvedFieldEntry* resolved_field_entry_at(int field_index); + inline int resolved_field_entries_length() const; + // ResolvedIndyEntry getters inline ResolvedIndyEntry* resolved_indy_entry_at(int index); inline int resolved_indy_entries_length() const; diff --git a/src/hotspot/share/oops/constantPool.inline.hpp b/src/hotspot/share/oops/constantPool.inline.hpp index f553c385cdedc..e6e67f0a58b63 100644 --- a/src/hotspot/share/oops/constantPool.inline.hpp +++ b/src/hotspot/share/oops/constantPool.inline.hpp @@ -28,6 +28,7 @@ #include "oops/constantPool.hpp" #include "oops/cpCache.inline.hpp" +#include "oops/resolvedFieldEntry.hpp" #include "oops/resolvedIndyEntry.hpp" #include "runtime/atomic.hpp" @@ -42,6 +43,14 @@ inline Klass* ConstantPool::resolved_klass_at(int which) const { // Used by Com return Atomic::load_acquire(adr); } +inline ResolvedFieldEntry* ConstantPool::resolved_field_entry_at(int field_index) { + return cache()->resolved_field_entry_at(field_index); +} + +inline int ConstantPool::resolved_field_entries_length() const { + return cache()->resolved_field_entries_length(); +} + inline u2 ConstantPool::invokedynamic_bootstrap_ref_index_at(int indy_index) const { return cache()->resolved_indy_entry_at(decode_invokedynamic_index(indy_index))->constant_pool_index(); } diff --git a/src/hotspot/share/oops/cpCache.cpp b/src/hotspot/share/oops/cpCache.cpp index fa785389e8cc7..9a9d147632d63 100644 --- a/src/hotspot/share/oops/cpCache.cpp +++ b/src/hotspot/share/oops/cpCache.cpp @@ -46,6 +46,7 @@ #include "oops/cpCache.inline.hpp" #include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" +#include "oops/resolvedFieldEntry.hpp" #include "oops/resolvedIndyEntry.hpp" #include "prims/methodHandles.hpp" #include "runtime/arguments.hpp" @@ -640,28 +641,35 @@ void ConstantPoolCacheEntry::verify(outputStream* st) const { // Implementation of ConstantPoolCache +template +static Array* initialize_resolved_entries_array(ClassLoaderData* loader_data, GrowableArray entries, TRAPS) { + Array* resolved_entries; + if (entries.length() != 0) { + resolved_entries = MetadataFactory::new_array(loader_data, entries.length(), CHECK_NULL); + for (int i = 0; i < entries.length(); i++) { + resolved_entries->at_put(i, entries.at(i)); + } + return resolved_entries; + } + return nullptr; +} + ConstantPoolCache* ConstantPoolCache::allocate(ClassLoaderData* loader_data, const intStack& index_map, const intStack& invokedynamic_map, const GrowableArray indy_entries, + const GrowableArray field_entries, TRAPS) { const int length = index_map.length(); int size = ConstantPoolCache::size(length); - // Initialize ResolvedIndyEntry array with available data - Array* resolved_indy_entries; - if (indy_entries.length()) { - resolved_indy_entries = MetadataFactory::new_array(loader_data, indy_entries.length(), CHECK_NULL); - for (int i = 0; i < indy_entries.length(); i++) { - resolved_indy_entries->at_put(i, indy_entries.at(i)); - } - } else { - resolved_indy_entries = nullptr; - } + // Initialize resolved entry arrays with available data + Array* resolved_field_entries = initialize_resolved_entries_array(loader_data, field_entries, CHECK_NULL); + Array* resolved_indy_entries = initialize_resolved_entries_array(loader_data, indy_entries, CHECK_NULL); return new (loader_data, size, MetaspaceObj::ConstantPoolCacheType, THREAD) - ConstantPoolCache(length, index_map, invokedynamic_map, resolved_indy_entries); + ConstantPoolCache(length, index_map, invokedynamic_map, resolved_indy_entries, resolved_field_entries); } void ConstantPoolCache::initialize(const intArray& inverse_index_map, @@ -714,6 +722,11 @@ void ConstantPoolCache::remove_unshareable_info() { resolved_indy_entry_at(i)->remove_unshareable_info(); } } + if (_resolved_field_entries != nullptr) { + for (int i = 0; i < _resolved_field_entries->length(); i++) { + resolved_field_entry_at(i)->remove_unshareable_info(); + } + } } #endif // INCLUDE_CDS @@ -727,8 +740,14 @@ void ConstantPoolCache::deallocate_contents(ClassLoaderData* data) { if (_initial_entries != nullptr) { Arguments::assert_is_dumping_archive(); MetadataFactory::free_array(data, _initial_entries); - if (_resolved_indy_entries) + if (_resolved_indy_entries) { MetadataFactory::free_array(data, _resolved_indy_entries); + _resolved_indy_entries = nullptr; + } + if (_resolved_field_entries) { + MetadataFactory::free_array(data, _resolved_field_entries); + _resolved_field_entries = nullptr; + } _initial_entries = nullptr; } #endif @@ -830,6 +849,9 @@ void ConstantPoolCache::metaspace_pointers_do(MetaspaceClosure* it) { if (_resolved_indy_entries != nullptr) { it->push(&_resolved_indy_entries, MetaspaceClosure::_writable); } + if (_resolved_field_entries != nullptr) { + it->push(&_resolved_field_entries, MetaspaceClosure::_writable); + } } bool ConstantPoolCache::save_and_throw_indy_exc( @@ -924,14 +946,8 @@ void ConstantPoolCache::print_on(outputStream* st) const { st->print_cr("%s", internal_name()); // print constant pool cache entries for (int i = 0; i < length(); i++) entry_at(i)->print(st, i, this); - for (int i = 0; i < resolved_indy_entries_length(); i++) { - ResolvedIndyEntry* indy_entry = resolved_indy_entry_at(i); - indy_entry->print_on(st); - if (indy_entry->has_appendix()) { - st->print(" appendix: "); - constant_pool()->resolved_reference_from_indy(i)->print_on(st); - } - } + print_resolved_field_entries(st); + print_resolved_indy_entries(st); } void ConstantPoolCache::print_value_on(outputStream* st) const { @@ -942,9 +958,20 @@ void ConstantPoolCache::print_value_on(outputStream* st) const { } -void ConstantPoolCache::print_resolved_indy_entries(outputStream* st) const { - for (int i = 0; i < _resolved_indy_entries->length(); i++) { - _resolved_indy_entries->at(i).print_on(st); +void ConstantPoolCache::print_resolved_field_entries(outputStream* st) const { + for (int field_index = 0; field_index < resolved_field_entries_length(); field_index++) { + resolved_field_entry_at(field_index)->print_on(st); + } +} + +void ConstantPoolCache::print_resolved_indy_entries(outputStream* st) const { + for (int indy_index = 0; indy_index < resolved_indy_entries_length(); indy_index++) { + ResolvedIndyEntry* indy_entry = resolved_indy_entry_at(indy_index); + indy_entry->print_on(st); + if (indy_entry->has_appendix()) { + st->print(" appendix: "); + constant_pool()->resolved_reference_from_indy(indy_index)->print_on(st); + } } } diff --git a/src/hotspot/share/oops/cpCache.hpp b/src/hotspot/share/oops/cpCache.hpp index 5c9810fa2c1b5..5d965ee26b5c4 100644 --- a/src/hotspot/share/oops/cpCache.hpp +++ b/src/hotspot/share/oops/cpCache.hpp @@ -128,6 +128,7 @@ // source code. The _indices field with the bytecode must be written last. class CallInfo; +class ResolvedFieldEntry; class ResolvedIndyEntry; class ConstantPoolCacheEntry { @@ -406,7 +407,8 @@ class ConstantPoolCache: public MetaspaceObj { // RedefineClasses support uint64_t _gc_epoch; - Array* _resolved_indy_entries; + Array* _resolved_indy_entries; + Array* _resolved_field_entries; CDS_ONLY(Array* _initial_entries;) @@ -417,7 +419,8 @@ class ConstantPoolCache: public MetaspaceObj { ConstantPoolCache(int length, const intStack& inverse_index_map, const intStack& invokedynamic_references_map, - Array* indy_info); + Array* indy_info, + Array* field_entries); // Initialization void initialize(const intArray& inverse_index_map, @@ -427,6 +430,7 @@ class ConstantPoolCache: public MetaspaceObj { const intStack& cp_cache_map, const intStack& invokedynamic_references_map, const GrowableArray indy_entries, + const GrowableArray field_entries, TRAPS); int length() const { return _length; } @@ -442,14 +446,20 @@ class ConstantPoolCache: public MetaspaceObj { Array* reference_map() const { return _reference_map; } void set_reference_map(Array* o) { _reference_map = o; } + Array* resolved_field_entries() { return _resolved_field_entries; } + inline ResolvedFieldEntry* resolved_field_entry_at(int field_index) const; + inline int resolved_field_entries_length() const; + void print_resolved_field_entries(outputStream* st) const; + Array* resolved_indy_entries() { return _resolved_indy_entries; } inline ResolvedIndyEntry* resolved_indy_entry_at(int index) const; inline int resolved_indy_entries_length() const; void print_resolved_indy_entries(outputStream* st) const; // Assembly code support - static ByteSize resolved_references_offset() { return byte_offset_of(ConstantPoolCache, _resolved_references); } - static ByteSize invokedynamic_entries_offset() { return byte_offset_of(ConstantPoolCache, _resolved_indy_entries); } + static ByteSize resolved_references_offset() { return byte_offset_of(ConstantPoolCache, _resolved_references); } + static ByteSize invokedynamic_entries_offset() { return byte_offset_of(ConstantPoolCache, _resolved_indy_entries); } + static ByteSize field_entries_offset() { return byte_offset_of(ConstantPoolCache, _resolved_field_entries); } #if INCLUDE_CDS void remove_unshareable_info(); diff --git a/src/hotspot/share/oops/cpCache.inline.hpp b/src/hotspot/share/oops/cpCache.inline.hpp index 0ac41976f6501..177af1d2589be 100644 --- a/src/hotspot/share/oops/cpCache.inline.hpp +++ b/src/hotspot/share/oops/cpCache.inline.hpp @@ -28,6 +28,7 @@ #include "oops/cpCache.hpp" #include "oops/oopHandle.inline.hpp" +#include "oops/resolvedFieldEntry.hpp" #include "oops/resolvedIndyEntry.hpp" #include "runtime/atomic.hpp" @@ -88,11 +89,13 @@ inline bool ConstantPoolCacheEntry::indy_resolution_failed() const { inline ConstantPoolCache::ConstantPoolCache(int length, const intStack& inverse_index_map, const intStack& invokedynamic_references_map, - Array* invokedynamic_info) : + Array* invokedynamic_info, + Array* field_entries) : _length(length), _constant_pool(nullptr), _gc_epoch(0), - _resolved_indy_entries(invokedynamic_info) { + _resolved_indy_entries(invokedynamic_info), + _resolved_field_entries(field_entries) { CDS_JAVA_HEAP_ONLY(_archived_references_index = -1;) initialize(inverse_index_map, invokedynamic_references_map); @@ -107,6 +110,14 @@ inline objArrayOop ConstantPoolCache::resolved_references() { return (objArrayOop)obj; } +inline ResolvedFieldEntry* ConstantPoolCache::resolved_field_entry_at(int field_index) const { + return _resolved_field_entries->adr_at(field_index); +} + +inline int ConstantPoolCache::resolved_field_entries_length() const { + return _resolved_field_entries->length(); +} + inline ResolvedIndyEntry* ConstantPoolCache::resolved_indy_entry_at(int index) const { return _resolved_indy_entries->adr_at(index); } diff --git a/src/hotspot/share/oops/generateOopMap.cpp b/src/hotspot/share/oops/generateOopMap.cpp index 2c7f04bd59437..b48bd23cc2e81 100644 --- a/src/hotspot/share/oops/generateOopMap.cpp +++ b/src/hotspot/share/oops/generateOopMap.cpp @@ -1597,10 +1597,18 @@ void GenerateOopMap::interp1(BytecodeStream *itr) { case Bytecodes::_jsr: do_jsr(itr->dest()); break; case Bytecodes::_jsr_w: do_jsr(itr->dest_w()); break; - case Bytecodes::_getstatic: do_field(true, true, itr->get_index_u2_cpcache(), itr->bci(), itr->code()); break; - case Bytecodes::_putstatic: do_field(false, true, itr->get_index_u2_cpcache(), itr->bci(), itr->code()); break; - case Bytecodes::_getfield: do_field(true, false, itr->get_index_u2_cpcache(), itr->bci(), itr->code()); break; - case Bytecodes::_putfield: do_field(false, false, itr->get_index_u2_cpcache(), itr->bci(), itr->code()); break; + case Bytecodes::_getstatic: + do_field(true, true, itr->get_index_u2(), itr->bci(), itr->code()); + break; + case Bytecodes::_putstatic: + do_field(false, true, itr->get_index_u2(), itr->bci(), itr->code()); + break; + case Bytecodes::_getfield: + do_field(true, false, itr->get_index_u2(), itr->bci(), itr->code()); + break; + case Bytecodes::_putfield: + do_field(false, false, itr->get_index_u2(), itr->bci(), itr->code()); + break; case Bytecodes::_invokevirtual: case Bytecodes::_invokespecial: do_method(false, false, itr->get_index_u2_cpcache(), itr->bci(), itr->code()); break; diff --git a/src/hotspot/share/oops/resolvedFieldEntry.cpp b/src/hotspot/share/oops/resolvedFieldEntry.cpp new file mode 100644 index 0000000000000..779f7676293b4 --- /dev/null +++ b/src/hotspot/share/oops/resolvedFieldEntry.cpp @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "resolvedFieldEntry.hpp" + +void ResolvedFieldEntry::print_on(outputStream* st) const { + st->print_cr("Field Entry:"); + + if (field_holder() != nullptr) { + st->print_cr(" - Holder: " INTPTR_FORMAT " %s", p2i(field_holder()), field_holder()->external_name()); + } else { + st->print_cr("- Holder: null"); + } + st->print_cr(" - Offset: %d", field_offset()); + st->print_cr(" - Field Index: %d", field_index()); + st->print_cr(" - CP Index: %d", constant_pool_index()); + st->print_cr(" - TOS: %s", type2name(as_BasicType((TosState)tos_state()))); + st->print_cr(" - Is Final: %d", is_final()); + st->print_cr(" - Is Volatile: %d", is_volatile()); + st->print_cr(" - Get Bytecode: %s", Bytecodes::name((Bytecodes::Code)get_code())); + st->print_cr(" - Put Bytecode: %s", Bytecodes::name((Bytecodes::Code)put_code())); +} + +void ResolvedFieldEntry::remove_unshareable_info() { + u2 saved_cpool_index = _cpool_index; + memset(this, 0, sizeof(*this)); + _cpool_index = saved_cpool_index; +} diff --git a/src/hotspot/share/oops/resolvedFieldEntry.hpp b/src/hotspot/share/oops/resolvedFieldEntry.hpp new file mode 100644 index 0000000000000..8b654b60530b5 --- /dev/null +++ b/src/hotspot/share/oops/resolvedFieldEntry.hpp @@ -0,0 +1,144 @@ +/* + * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_OOPS_RESOLVEDFIELDENTRY_HPP +#define SHARE_OOPS_RESOLVEDFIELDENTRY_HPP + +#include "interpreter/bytecodes.hpp" +#include "oops/instanceKlass.hpp" +#include "runtime/atomic.hpp" +#include "utilities/sizes.hpp" + +// ResolvedFieldEntry contains the resolution information for field related bytecodes like +// like getfield, putfield, getstatic, and putstatic. A member of this class can be initialized +// with the constant pool index associated with the bytecode before any resolution is done, where +// "resolution" refers to populating the getcode and putcode fields and other relevant information. +// The field's type (TOS), offset, holder klass, and index within that class can all be acquired +// together and are used to populate this structure. These entries are contained +// within the ConstantPoolCache and are accessed with indices added to the invokedynamic bytecode after +// rewriting. + +// Field bytecodes start with a constant pool index as their operate, which is then rewritten to +// a "field index", which is an index into the array of ResolvedFieldEntry. + +//class InstanceKlass; +class ResolvedFieldEntry { + friend class VMStructs; + + InstanceKlass* _field_holder; // Field holder klass + int _field_offset; // Field offset in bytes + u2 _field_index; // Index into field information in holder InstanceKlass + u2 _cpool_index; // Constant pool index + u1 _tos_state; // TOS state + u1 _flags; // Flags: [0000|00|is_final|is_volatile] + u1 _get_code, _put_code; // Get and Put bytecodes of the field + +public: + ResolvedFieldEntry(u2 cpi) : + _field_holder(nullptr), + _field_offset(0), + _field_index(0), + _cpool_index(cpi), + _tos_state(0), + _flags(0), + _get_code(0), + _put_code(0) {} + ResolvedFieldEntry() : + ResolvedFieldEntry(0) {} + + // Bit shift to get flags + // Note: Only two flags exists at the moment but more could be added + enum { + is_volatile_shift = 0, + is_final_shift = 1, // unused + }; + + // Getters + InstanceKlass* field_holder() const { return _field_holder; } + int field_offset() const { return _field_offset; } + u2 field_index() const { return _field_index; } + u2 constant_pool_index() const { return _cpool_index; } + u1 tos_state() const { return _tos_state; } + u1 get_code() const { return Atomic::load_acquire(&_get_code); } + u1 put_code() const { return Atomic::load_acquire(&_put_code); } + bool is_final() const { return (_flags & (1 << is_final_shift)) != 0; } + bool is_volatile () const { return (_flags & (1 << is_volatile_shift)) != 0; } + bool is_resolved(Bytecodes::Code code) const { + switch(code) { + case Bytecodes::_getstatic: + case Bytecodes::_getfield: + return (get_code() == code); + case Bytecodes::_putstatic: + case Bytecodes::_putfield: + return (put_code() == code); + default: + ShouldNotReachHere(); + return false; + } + } + + // Printing + void print_on(outputStream* st) const; + + void set_flags(bool is_final, bool is_volatile) { + u1 new_flags = (static_cast(is_final) << static_cast(is_final_shift)) | static_cast(is_volatile); + _flags = new_flags; + } + + inline void set_bytecode(u1* code, u1 new_code) { + #ifdef ASSERT + // Read once. + volatile Bytecodes::Code c = (Bytecodes::Code)*code; + assert(c == 0 || c == new_code || new_code == 0, "update must be consistent"); + #endif + Atomic::release_store(code, new_code); + } + + // Populate the strucutre with resolution information + void fill_in(InstanceKlass* klass, intx offset, int index, int tos_state, u1 b1, u1 b2) { + _field_holder = klass; + _field_offset = offset; + _field_index = index; + _tos_state = tos_state; + + // These must be set after the other fields + set_bytecode(&_get_code, b1); + set_bytecode(&_put_code, b2); + } + + // CDS + void remove_unshareable_info(); + + // Offsets + static ByteSize field_holder_offset() { return byte_offset_of(ResolvedFieldEntry, _field_holder); } + static ByteSize field_offset_offset() { return byte_offset_of(ResolvedFieldEntry, _field_offset); } + static ByteSize field_index_offset() { return byte_offset_of(ResolvedFieldEntry, _field_index); } + static ByteSize get_code_offset() { return byte_offset_of(ResolvedFieldEntry, _get_code); } + static ByteSize put_code_offset() { return byte_offset_of(ResolvedFieldEntry, _put_code); } + static ByteSize type_offset() { return byte_offset_of(ResolvedFieldEntry, _tos_state); } + static ByteSize flags_offset() { return byte_offset_of(ResolvedFieldEntry, _flags); } + +}; + +#endif //SHARE_OOPS_RESOLVEDFIELDENTRY_HPP diff --git a/src/hotspot/share/prims/jvmtiClassFileReconstituter.cpp b/src/hotspot/share/prims/jvmtiClassFileReconstituter.cpp index 193ef07ddca79..699c97c713997 100644 --- a/src/hotspot/share/prims/jvmtiClassFileReconstituter.cpp +++ b/src/hotspot/share/prims/jvmtiClassFileReconstituter.cpp @@ -1026,7 +1026,13 @@ void JvmtiClassFileReconstituter::copy_bytecodes(const methodHandle& mh, case Bytecodes::_getstatic : // fall through case Bytecodes::_putstatic : // fall through case Bytecodes::_getfield : // fall through - case Bytecodes::_putfield : // fall through + case Bytecodes::_putfield : { + int field_index = Bytes::get_native_u2(bcp+1); + u2 pool_index = mh->constants()->resolved_field_entry_at(field_index)->constant_pool_index(); + assert(pool_index < mh->constants()->length(), "sanity check"); + Bytes::put_Java_u2((address)(p+1), pool_index); // java byte ordering + break; + } case Bytecodes::_invokevirtual : // fall through case Bytecodes::_invokespecial : // fall through case Bytecodes::_invokestatic : // fall through diff --git a/src/hotspot/share/prims/methodComparator.cpp b/src/hotspot/share/prims/methodComparator.cpp index 478b4911a24e0..1511182a836c3 100644 --- a/src/hotspot/share/prims/methodComparator.cpp +++ b/src/hotspot/share/prims/methodComparator.cpp @@ -88,7 +88,18 @@ bool MethodComparator::args_same(Bytecodes::Code const c_old, Bytecodes::Code c case Bytecodes::_getstatic : // fall through case Bytecodes::_putstatic : // fall through case Bytecodes::_getfield : // fall through - case Bytecodes::_putfield : // fall through + case Bytecodes::_putfield : { + int index_old = s_old->get_index_u2(); + int index_new = s_new->get_index_u2(); + // Check if the names of classes, field/method names and signatures at these indexes + // are the same. Indices which are really into constantpool cache (rather than constant + // pool itself) are accepted by the constantpool query routines below. + if ((old_cp->klass_ref_at_noresolve(index_old, c_old) != new_cp->klass_ref_at_noresolve(index_new, c_old)) || + (old_cp->name_ref_at(index_old, c_old) != new_cp->name_ref_at(index_new, c_old)) || + (old_cp->signature_ref_at(index_old, c_old) != new_cp->signature_ref_at(index_new, c_old))) + return false; + break; + } case Bytecodes::_invokevirtual : // fall through case Bytecodes::_invokespecial : // fall through case Bytecodes::_invokestatic : // fall through diff --git a/src/hotspot/share/prims/whitebox.cpp b/src/hotspot/share/prims/whitebox.cpp index 9303bf4c946ff..e3f7c263bf0f1 100644 --- a/src/hotspot/share/prims/whitebox.cpp +++ b/src/hotspot/share/prims/whitebox.cpp @@ -1885,6 +1885,24 @@ WB_ENTRY(jint, WB_ConstantPoolEncodeIndyIndex(JNIEnv* env, jobject wb, jint inde return ConstantPool::encode_invokedynamic_index(index); WB_END +WB_ENTRY(jint, WB_getFieldEntriesLength(JNIEnv* env, jobject wb, jclass klass)) + InstanceKlass* ik = InstanceKlass::cast(java_lang_Class::as_Klass(JNIHandles::resolve(klass))); + ConstantPool* cp = ik->constants(); + if (cp->cache() == nullptr) { + return -1; + } + return cp->resolved_field_entries_length(); +WB_END + +WB_ENTRY(jint, WB_getFieldCPIndex(JNIEnv* env, jobject wb, jclass klass, jint index)) + InstanceKlass* ik = InstanceKlass::cast(java_lang_Class::as_Klass(JNIHandles::resolve(klass))); + ConstantPool* cp = ik->constants(); + if (cp->cache() == NULL) { + return -1; + } + return cp->resolved_field_entry_at(index)->constant_pool_index(); +WB_END + WB_ENTRY(jint, WB_getIndyInfoLength(JNIEnv* env, jobject wb, jclass klass)) InstanceKlass* ik = InstanceKlass::cast(java_lang_Class::as_Klass(JNIHandles::resolve(klass))); ConstantPool* cp = ik->constants(); @@ -2771,6 +2789,8 @@ static JNINativeMethod methods[] = { CC"(Ljava/lang/Class;I)I", (void*)&WB_ConstantPoolRemapInstructionOperandFromCache}, {CC"encodeConstantPoolIndyIndex0", CC"(I)I", (void*)&WB_ConstantPoolEncodeIndyIndex}, + {CC"getFieldEntriesLength0", CC"(Ljava/lang/Class;)I", (void*)&WB_getFieldEntriesLength}, + {CC"getFieldCPIndex0", CC"(Ljava/lang/Class;I)I", (void*)&WB_getFieldCPIndex}, {CC"getIndyInfoLength0", CC"(Ljava/lang/Class;)I", (void*)&WB_getIndyInfoLength}, {CC"getIndyCPIndex0", CC"(Ljava/lang/Class;I)I", (void*)&WB_getIndyCPIndex}, {CC"printClasses0", CC"(Ljava/lang/String;I)Ljava/lang/String;", (void*)&WB_printClasses}, diff --git a/src/hotspot/share/runtime/vmStructs.cpp b/src/hotspot/share/runtime/vmStructs.cpp index 0dd92c8b436ea..dbe31bcf34f3f 100644 --- a/src/hotspot/share/runtime/vmStructs.cpp +++ b/src/hotspot/share/runtime/vmStructs.cpp @@ -83,6 +83,7 @@ #include "oops/objArrayOop.hpp" #include "oops/oop.inline.hpp" #include "oops/oopHandle.hpp" +#include "oops/resolvedFieldEntry.hpp" #include "oops/resolvedIndyEntry.hpp" #include "oops/symbol.hpp" #include "oops/typeArrayKlass.hpp" @@ -223,6 +224,8 @@ nonstatic_field(ConstantPoolCache, _reference_map, Array*) \ nonstatic_field(ConstantPoolCache, _length, int) \ nonstatic_field(ConstantPoolCache, _constant_pool, ConstantPool*) \ + nonstatic_field(ConstantPoolCache, _resolved_field_entries, Array*) \ + nonstatic_field(ResolvedFieldEntry, _cpool_index, u2) \ nonstatic_field(ConstantPoolCache, _resolved_indy_entries, Array*) \ nonstatic_field(ResolvedIndyEntry, _cpool_index, u2) \ volatile_nonstatic_field(InstanceKlass, _array_klasses, ObjArrayKlass*) \ @@ -479,6 +482,8 @@ \ nonstatic_field(Array, _length, int) \ nonstatic_field(Array, _data[0], Klass*) \ + nonstatic_field(Array, _length, int) \ + nonstatic_field(Array, _data[0], ResolvedFieldEntry) \ nonstatic_field(Array, _length, int) \ nonstatic_field(Array, _data[0], ResolvedIndyEntry) \ \ @@ -962,14 +967,15 @@ /* Array */ \ /************/ \ \ - nonstatic_field(Array, _length, int) \ - unchecked_nonstatic_field(Array, _data, sizeof(int)) \ - unchecked_nonstatic_field(Array, _data, sizeof(u1)) \ - unchecked_nonstatic_field(Array, _data, sizeof(u2)) \ - unchecked_nonstatic_field(Array, _data, sizeof(Method*)) \ - unchecked_nonstatic_field(Array, _data, sizeof(Klass*)) \ - unchecked_nonstatic_field(Array, _data, sizeof(ResolvedIndyEntry)) \ - unchecked_nonstatic_field(Array*>, _data, sizeof(Array*)) \ + nonstatic_field(Array, _length, int) \ + unchecked_nonstatic_field(Array, _data, sizeof(int)) \ + unchecked_nonstatic_field(Array, _data, sizeof(u1)) \ + unchecked_nonstatic_field(Array, _data, sizeof(u2)) \ + unchecked_nonstatic_field(Array, _data, sizeof(Method*)) \ + unchecked_nonstatic_field(Array, _data, sizeof(Klass*)) \ + unchecked_nonstatic_field(Array, _data, sizeof(ResolvedFieldEntry)) \ + unchecked_nonstatic_field(Array, _data, sizeof(ResolvedIndyEntry)) \ + unchecked_nonstatic_field(Array*>, _data, sizeof(Array*)) \ \ /*********************************/ \ /* java_lang_Class fields */ \ @@ -1899,6 +1905,7 @@ declare_type(Array, MetaspaceObj) \ declare_type(Array, MetaspaceObj) \ declare_type(Array, MetaspaceObj) \ + declare_type(Array, MetaspaceObj) \ declare_type(Array, MetaspaceObj) \ declare_type(Array*>, MetaspaceObj) \ \ @@ -1917,6 +1924,7 @@ declare_toplevel_type(RuntimeBlob*) \ declare_toplevel_type(CompressedWriteStream*) \ declare_toplevel_type(ConstantPoolCacheEntry) \ + declare_toplevel_type(ResolvedFieldEntry) \ declare_toplevel_type(ResolvedIndyEntry) \ declare_toplevel_type(elapsedTimer) \ declare_toplevel_type(frame) \ diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/ConstantPool.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/ConstantPool.java index cec1849cf7bc0..bde0ca6e05457 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/ConstantPool.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/ConstantPool.java @@ -267,8 +267,7 @@ public int to_cp_index(int index, int code) { case Bytecodes._getstatic: case Bytecodes._putfield: case Bytecodes._putstatic: - // TODO: handle resolved field entries with new structure - // i = .... + return getCache().getFieldEntryAt(index).getConstantPoolIndex(); case Bytecodes._invokeinterface: case Bytecodes._invokehandle: case Bytecodes._invokespecial: diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/ConstantPoolCache.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/ConstantPoolCache.java index 5e8379559aced..0b54f71d2d5cd 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/ConstantPoolCache.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/ConstantPoolCache.java @@ -55,6 +55,7 @@ private static synchronized void initialize(TypeDataBase db) throws WrongTypeExc intSize = VM.getVM().getObjectHeap().getIntSize(); resolvedReferences = type.getAddressField("_resolved_references"); referenceMap = type.getAddressField("_reference_map"); + resolvedFieldArray = type.getAddressField("_resolved_field_entries"); resolvedIndyArray = type.getAddressField("_resolved_indy_entries"); } @@ -72,6 +73,7 @@ public ConstantPoolCache(Address addr) { private static long intSize; private static AddressField resolvedReferences; private static AddressField referenceMap; + private static AddressField resolvedFieldArray; private static AddressField resolvedIndyArray; public ConstantPool getConstants() { return (ConstantPool) constants.getValue(this); } @@ -91,6 +93,12 @@ public ResolvedIndyEntry getIndyEntryAt(int i) { return array.getAt(i); } + public ResolvedFieldEntry getFieldEntryAt(int i) { + Address addr = resolvedFieldArray.getValue(getAddress()); + ResolvedFieldArray array = new ResolvedFieldArray(addr); + return array.getAt(i); + } + public int getIntAt(int entry, int fld) { long offset = baseOffset + entry * elementSize + fld * intSize; return (int) getAddress().getCIntegerAt(offset, intSize, true ); diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/ResolvedFieldArray.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/ResolvedFieldArray.java new file mode 100644 index 0000000000000..3e606a9fbab16 --- /dev/null +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/ResolvedFieldArray.java @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + + package sun.jvm.hotspot.oops; + + import sun.jvm.hotspot.debugger.Address; + import sun.jvm.hotspot.runtime.VM; + import sun.jvm.hotspot.types.Type; + import sun.jvm.hotspot.types.TypeDataBase; + import sun.jvm.hotspot.types.WrongTypeException; + import sun.jvm.hotspot.utilities.GenericArray; + import sun.jvm.hotspot.utilities.Observable; + import sun.jvm.hotspot.utilities.Observer; + + public class ResolvedFieldArray extends GenericArray { + static { + VM.registerVMInitializedObserver(new Observer() { + public void update(Observable o, Object data) { + initialize(VM.getVM().getTypeDataBase()); + } + }); + } + + private static synchronized void initialize(TypeDataBase db) throws WrongTypeException { + elemType = db.lookupType("ResolvedFieldEntry"); + + Type type = db.lookupType("Array"); + dataFieldOffset = type.getAddressField("_data").getOffset(); + } + + private static long dataFieldOffset; + protected static Type elemType; + + public ResolvedFieldArray(Address addr) { + super(addr, dataFieldOffset); + } + + public ResolvedFieldEntry getAt(int index) { + if (index < 0 || index >= length()) throw new ArrayIndexOutOfBoundsException(index + " " + length()); + + Type elemType = getElemType(); + + Address data = getAddress().addOffsetTo(dataFieldOffset); + long elemSize = elemType.getSize(); + + return new ResolvedFieldEntry(data.addOffsetTo(index* elemSize)); + } + + public Type getElemType() { + return elemType; + } + } diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/ResolvedFieldEntry.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/ResolvedFieldEntry.java new file mode 100644 index 0000000000000..fee97f97f4d9a --- /dev/null +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/ResolvedFieldEntry.java @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + + package sun.jvm.hotspot.oops; + + import java.util.*; + import sun.jvm.hotspot.debugger.*; + import sun.jvm.hotspot.runtime.*; + import sun.jvm.hotspot.types.*; + import sun.jvm.hotspot.utilities.*; + import sun.jvm.hotspot.utilities.Observable; + import sun.jvm.hotspot.utilities.Observer; + + public class ResolvedFieldEntry extends VMObject { + private static long size; + private static long baseOffset; + private static CIntegerField cpIndex; + + static { + VM.registerVMInitializedObserver(new Observer() { + public void update(Observable o, Object data) { + initialize(VM.getVM().getTypeDataBase()); + } + }); + } + + private static synchronized void initialize(TypeDataBase db) throws WrongTypeException { + Type type = db.lookupType("ResolvedFieldEntry"); + size = type.getSize(); + + cpIndex = type.getCIntegerField("_cpool_index"); + } + + ResolvedFieldEntry(Address addr) { + super(addr); + } + + public int getConstantPoolIndex() { + return this.getAddress().getJShortAt(cpIndex.getOffset()); + } + + public void iterateFields(MetadataVisitor visitor) { } + } diff --git a/test/hotspot/gtest/oops/test_cpCache_output.cpp b/test/hotspot/gtest/oops/test_cpCache_output.cpp index ad5e420cdf4db..8d5015e6eebbe 100644 --- a/test/hotspot/gtest/oops/test_cpCache_output.cpp +++ b/test/hotspot/gtest/oops/test_cpCache_output.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,6 +61,12 @@ TEST_VM(ConstantPoolCache, print_on) { ASSERT_TRUE(strstr(output, "num parameters:") != NULL) << "must have number of parameters"; // field entry test - ASSERT_TRUE(strstr(output, "volatile:") != NULL) << "must have volatile flag"; - ASSERT_TRUE(strstr(output, "field index:") != NULL) << "must have field index"; + ASSERT_TRUE(strstr(output, "Offset:") != NULL) << "must have field offset"; + ASSERT_TRUE(strstr(output, "Field Index:") != NULL) << "must have field index"; + ASSERT_TRUE(strstr(output, "CP Index:") != NULL) << "must have constant pool index"; + ASSERT_TRUE(strstr(output, "TOS:") != NULL) << "must have type"; + ASSERT_TRUE(strstr(output, "Is Final:") != NULL) << "must have final flag"; + ASSERT_TRUE(strstr(output, "Is Volatile:") != NULL) << "must have volatile flag"; + ASSERT_TRUE(strstr(output, "Put Bytecode:") != NULL) << "must have \"put code\""; + ASSERT_TRUE(strstr(output, "Get Bytecode:") != NULL) << "must have \"get code\""; } diff --git a/test/hotspot/jtreg/compiler/jvmci/compilerToVM/ConstantPoolTestsHelper.java b/test/hotspot/jtreg/compiler/jvmci/compilerToVM/ConstantPoolTestsHelper.java index a751c2eee93bf..b2fb57f0a878e 100644 --- a/test/hotspot/jtreg/compiler/jvmci/compilerToVM/ConstantPoolTestsHelper.java +++ b/test/hotspot/jtreg/compiler/jvmci/compilerToVM/ConstantPoolTestsHelper.java @@ -84,6 +84,13 @@ public int getCPCacheIndex(int cpi) { } } } + if (constantPoolSS.getTagAt(cpi).equals(Tag.FIELDREF)) { + for (int field_index = 0; field_index < WB.getFieldEntriesLength(this.klass); field_index++) { + if (WB.getFieldCPIndex(this.klass, field_index) == cpi) { + return field_index; + } + } + } int cacheLength = WB.getConstantPoolCacheLength(this.klass); int indexTag = WB.getConstantPoolCacheIndexTag(); for (int cpci = indexTag; cpci < cacheLength + indexTag; cpci++) { diff --git a/test/hotspot/jtreg/runtime/interpreter/BytecodeTracerTest.java b/test/hotspot/jtreg/runtime/interpreter/BytecodeTracerTest.java index 6e43318e773f6..289b243f6e18a 100644 --- a/test/hotspot/jtreg/runtime/interpreter/BytecodeTracerTest.java +++ b/test/hotspot/jtreg/runtime/interpreter/BytecodeTracerTest.java @@ -172,7 +172,8 @@ public static void main(String args[]) throws Exception { .printClasses("BytecodeTracerTest$Linked", 0xff) .mustMatch("invokedynamic bsm=[0-9]+ [0-9]+ ") .mustMatch("BSM: REF_invokeStatic [0-9]+ "); test("invokedynamic in unlinked class") .printUnlinkedMethods("toString") diff --git a/test/lib/jdk/test/whitebox/WhiteBox.java b/test/lib/jdk/test/whitebox/WhiteBox.java index 66cd7567af0cc..77b8c7d5ddfcc 100644 --- a/test/lib/jdk/test/whitebox/WhiteBox.java +++ b/test/lib/jdk/test/whitebox/WhiteBox.java @@ -151,6 +151,18 @@ public int encodeConstantPoolIndyIndex(int index) { return encodeConstantPoolIndyIndex0(index); } + private native int getFieldEntriesLength0(Class aClass); + public int getFieldEntriesLength(Class aClass) { + Objects.requireNonNull(aClass); + return getFieldEntriesLength0(aClass); + } + + private native int getFieldCPIndex0(Class aClass, int index); + public int getFieldCPIndex(Class aClass, int index) { + Objects.requireNonNull(aClass); + return getFieldCPIndex0(aClass, index); + } + private native int getIndyInfoLength0(Class aClass); public int getIndyInfoLength(Class aClass) { Objects.requireNonNull(aClass);