From 2da0d1ab8de08ee6733b8abae6574fd9fa7ce131 Mon Sep 17 00:00:00 2001 From: Fredrik Bredberg Date: Fri, 12 Sep 2025 10:30:48 +0200 Subject: [PATCH 1/3] 8365191: Cleanup after removing LockingMode related code --- .../cpu/aarch64/c1_CodeStubs_aarch64.cpp | 8 +++---- .../cpu/aarch64/c1_LIRAssembler_aarch64.cpp | 4 +--- .../cpu/aarch64/c1_MacroAssembler_aarch64.cpp | 14 +++++------ .../cpu/aarch64/c1_MacroAssembler_aarch64.hpp | 22 ++++++++--------- .../cpu/aarch64/sharedRuntime_aarch64.cpp | 3 --- src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp | 7 +++--- src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp | 3 +-- src/hotspot/cpu/arm/c1_MacroAssembler_arm.cpp | 24 +++++++++---------- src/hotspot/cpu/arm/c1_MacroAssembler_arm.hpp | 6 ++--- src/hotspot/cpu/arm/sharedRuntime_arm.cpp | 10 ++++---- src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp | 7 +++--- src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp | 4 +--- src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp | 8 +++---- .../cpu/riscv/c1_LIRAssembler_riscv.cpp | 4 +--- .../cpu/riscv/c1_MacroAssembler_riscv.cpp | 14 +++++------ .../cpu/riscv/c1_MacroAssembler_riscv.hpp | 20 ++++++++-------- src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp | 2 -- src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp | 10 ++++---- src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp | 4 +--- src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp | 8 +++---- src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp | 4 +--- src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp | 18 +++++++------- src/hotspot/cpu/x86/c1_MacroAssembler_x86.hpp | 18 +++++++------- src/hotspot/share/c1/c1_CodeStubs.hpp | 11 +++------ src/hotspot/share/c1/c1_LIRGenerator.cpp | 2 +- src/hotspot/share/jvmci/vmStructs_jvmci.cpp | 2 +- src/hotspot/share/runtime/basicLock.cpp | 2 +- src/hotspot/share/runtime/basicLock.hpp | 16 ++++++------- .../share/runtime/basicLock.inline.hpp | 8 +++---- src/hotspot/share/runtime/deoptimization.cpp | 2 +- .../share/runtime/synchronizer.inline.hpp | 2 +- src/hotspot/share/runtime/vmStructs.cpp | 2 +- .../classes/sun/jvm/hotspot/oops/Mark.java | 8 +------ .../sun/jvm/hotspot/runtime/BasicLock.java | 14 +---------- 34 files changed, 125 insertions(+), 166 deletions(-) diff --git a/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp index 954e4abee14ee..9bf466785352c 100644 --- a/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp @@ -216,10 +216,10 @@ void MonitorEnterStub::emit_code(LIR_Assembler* ce) { void MonitorExitStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); - if (_compute_lock) { - // lock_reg was destroyed by fast unlocking attempt => recompute it - ce->monitor_address(_monitor_ix, _lock_reg); - } + + // lock_reg was destroyed by fast unlocking attempt => recompute it + ce->monitor_address(_monitor_ix, _lock_reg); + ce->store_parameter(_lock_reg->as_register(), 0); // note: non-blocking leaf routine => no call info needed StubId exit_id; diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp index e9bb2350b5b2d..c7240ccd9290e 100644 --- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp @@ -409,7 +409,7 @@ int LIR_Assembler::emit_unwind_handler() { MonitorExitStub* stub = nullptr; if (method()->is_synchronized()) { monitor_address(0, FrameMap::r0_opr); - stub = new MonitorExitStub(FrameMap::r0_opr, true, 0); + stub = new MonitorExitStub(FrameMap::r0_opr, 0); __ unlock_object(r5, r4, r0, r6, *stub->entry()); __ bind(*stub->continuation()); } @@ -2481,7 +2481,6 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) { Register lock = op->lock_opr()->as_register(); Register temp = op->scratch_opr()->as_register(); if (op->code() == lir_lock) { - assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); // add debug info for NullPointerException only if one is possible int null_check_offset = __ lock_object(hdr, obj, lock, temp, *op->stub()->entry()); if (op->info() != nullptr) { @@ -2489,7 +2488,6 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) { } // done } else if (op->code() == lir_unlock) { - assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); __ unlock_object(hdr, obj, lock, temp, *op->stub()->entry()); } else { Unimplemented(); diff --git a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp index 8a79274b2ff65..31c36e749c596 100644 --- a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp @@ -59,28 +59,28 @@ void C1_MacroAssembler::float_cmp(bool is_float, int unordered_result, } } -int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register temp, Label& slow_case) { - assert_different_registers(hdr, obj, disp_hdr, temp, rscratch2); +int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register basic_lock, Register temp, Label& slow_case) { + assert_different_registers(hdr, obj, basic_lock, temp, rscratch2); int null_check_offset = -1; verify_oop(obj); // save object being locked into the BasicObjectLock - str(obj, Address(disp_hdr, BasicObjectLock::obj_offset())); + str(obj, Address(basic_lock, BasicObjectLock::obj_offset())); null_check_offset = offset(); - lightweight_lock(disp_hdr, obj, hdr, temp, rscratch2, slow_case); + lightweight_lock(basic_lock, obj, hdr, temp, rscratch2, slow_case); return null_check_offset; } -void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Register temp, Label& slow_case) { - assert_different_registers(hdr, obj, disp_hdr, temp, rscratch2); +void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register basic_lock, Register temp, Label& slow_case) { + assert_different_registers(hdr, obj, basic_lock, temp, rscratch2); // load object - ldr(obj, Address(disp_hdr, BasicObjectLock::obj_offset())); + ldr(obj, Address(basic_lock, BasicObjectLock::obj_offset())); verify_oop(obj); lightweight_unlock(obj, hdr, temp, rscratch2, slow_case); diff --git a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.hpp index fc8e83d706b50..7b181b104c10f 100644 --- a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -55,19 +55,19 @@ using MacroAssembler::null_check; Register result); // locking - // hdr : must be r0, contents destroyed - // obj : must point to the object to lock, contents preserved - // disp_hdr: must point to the displaced header location, contents preserved - // temp : temporary register, must not be rscratch1 or rscratch2 + // hdr : must be r0, contents destroyed + // obj : must point to the object to lock, contents preserved + // basic_lock: must point to the basic lock, contents preserved + // temp : temporary register, must not be rscratch1 or rscratch2 // returns code offset at which to add null check debug information - int lock_object (Register swap, Register obj, Register disp_hdr, Register temp, Label& slow_case); + int lock_object (Register swap, Register obj, Register basic_lock, Register temp, Label& slow_case); // unlocking - // hdr : contents destroyed - // obj : must point to the object to lock, contents preserved - // disp_hdr: must be r0 & must point to the displaced header location, contents destroyed - // temp : temporary register, must not be rscratch1 or rscratch2 - void unlock_object(Register swap, Register obj, Register lock, Register temp, Label& slow_case); + // hdr : contents destroyed + // obj : must point to the object to lock, contents preserved + // basic_lock: must be r0 & must point to the basic lock, contents destroyed + // temp : temporary register, must not be rscratch1 or rscratch2 + void unlock_object(Register swap, Register obj, Register basic_lock, Register temp, Label& slow_case); void initialize_object( Register obj, // result: pointer to object after successful allocation diff --git a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp index 748c3e8fb11f5..cff8e62fc72ef 100644 --- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp @@ -1763,9 +1763,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, Label lock_done; if (method->is_synchronized()) { - Label count; - const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes(); - // Get the handle (the 2nd argument) __ mov(oop_handle_reg, c_rarg1); diff --git a/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp b/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp index efdc190f09a19..8e49cfcbcaa38 100644 --- a/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp +++ b/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp @@ -200,9 +200,10 @@ void MonitorEnterStub::emit_code(LIR_Assembler* ce) { void MonitorExitStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); - if (_compute_lock) { - ce->monitor_address(_monitor_ix, _lock_reg); - } + + // lock_reg was destroyed by fast unlocking attempt => recompute it + ce->monitor_address(_monitor_ix, _lock_reg); + const Register lock_reg = _lock_reg->as_pointer_register(); ce->verify_reserved_argument_area_size(1); diff --git a/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp b/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp index c3b91e8c76f5d..ed9345837f1fd 100644 --- a/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp +++ b/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp @@ -245,7 +245,7 @@ int LIR_Assembler::emit_unwind_handler() { MonitorExitStub* stub = nullptr; if (method()->is_synchronized()) { monitor_address(0, FrameMap::R0_opr); - stub = new MonitorExitStub(FrameMap::R0_opr, true, 0); + stub = new MonitorExitStub(FrameMap::R0_opr, 0); __ unlock_object(R2, R1, R0, *stub->entry()); __ bind(*stub->continuation()); } @@ -2427,7 +2427,6 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) { Register lock = op->lock_opr()->as_pointer_register(); if (op->code() == lir_lock) { - assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); int null_check_offset = __ lock_object(hdr, obj, lock, *op->stub()->entry()); if (op->info() != nullptr) { add_debug_info_for_null_check(null_check_offset, op->info()); diff --git a/src/hotspot/cpu/arm/c1_MacroAssembler_arm.cpp b/src/hotspot/cpu/arm/c1_MacroAssembler_arm.cpp index f2b082697500d..ca7711353d2a4 100644 --- a/src/hotspot/cpu/arm/c1_MacroAssembler_arm.cpp +++ b/src/hotspot/cpu/arm/c1_MacroAssembler_arm.cpp @@ -176,17 +176,17 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, initialize_object(obj, tmp1, klass, len, tmp2, tmp3, header_size_in_bytes, -1, /* is_tlab_allocated */ UseTLAB); } -int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) { +int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register basic_lock, Label& slow_case) { int null_check_offset = 0; const Register tmp2 = Rtemp; // Rtemp should be free at c1 LIR level - assert_different_registers(hdr, obj, disp_hdr, tmp2); + assert_different_registers(hdr, obj, basic_lock, tmp2); assert(BasicObjectLock::lock_offset() == 0, "adjust this code"); assert(oopDesc::mark_offset_in_bytes() == 0, "Required by atomic instructions"); // save object being locked into the BasicObjectLock - str(obj, Address(disp_hdr, BasicObjectLock::obj_offset())); + str(obj, Address(basic_lock, BasicObjectLock::obj_offset())); null_check_offset = offset(); @@ -197,26 +197,26 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr b(slow_case, ne); } - Register t1 = disp_hdr; // Needs saving, probably - Register t2 = hdr; // blow - Register t3 = Rtemp; // blow + Register t1 = basic_lock; // Needs saving, probably + Register t2 = hdr; // blow + Register t3 = Rtemp; // blow lightweight_lock(obj, t1, t2, t3, 1 /* savemask - save t1 */, slow_case); // Success: fall through return null_check_offset; } -void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) { - assert_different_registers(hdr, obj, disp_hdr, Rtemp); +void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register basic_lock, Label& slow_case) { + assert_different_registers(hdr, obj, basic_lock, Rtemp); assert(BasicObjectLock::lock_offset() == 0, "adjust this code"); assert(oopDesc::mark_offset_in_bytes() == 0, "Required by atomic instructions"); - ldr(obj, Address(disp_hdr, BasicObjectLock::obj_offset())); + ldr(obj, Address(basic_lock, BasicObjectLock::obj_offset())); - Register t1 = disp_hdr; // Needs saving, probably - Register t2 = hdr; // blow - Register t3 = Rtemp; // blow + Register t1 = basic_lock; // Needs saving, probably + Register t2 = hdr; // blow + Register t3 = Rtemp; // blow lightweight_unlock(obj, t1, t2, t3, 1 /* savemask - save t1 */, slow_case); // Success: fall through diff --git a/src/hotspot/cpu/arm/c1_MacroAssembler_arm.hpp b/src/hotspot/cpu/arm/c1_MacroAssembler_arm.hpp index 0a626822a9b6e..fd88b6c4fe92d 100644 --- a/src/hotspot/cpu/arm/c1_MacroAssembler_arm.hpp +++ b/src/hotspot/cpu/arm/c1_MacroAssembler_arm.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -59,9 +59,9 @@ max_array_allocation_length = 0x01000000 }; - int lock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case); + int lock_object(Register hdr, Register obj, Register basic_lock, Label& slow_case); - void unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case); + void unlock_object(Register hdr, Register obj, Register basic_lock, Label& slow_case); // This platform only uses signal-based null checks. The Label is not needed. void null_check(Register r, Label *Lnull = nullptr) { MacroAssembler::null_check(r); } diff --git a/src/hotspot/cpu/arm/sharedRuntime_arm.cpp b/src/hotspot/cpu/arm/sharedRuntime_arm.cpp index dcf631525ab34..42ee43aaae998 100644 --- a/src/hotspot/cpu/arm/sharedRuntime_arm.cpp +++ b/src/hotspot/cpu/arm/sharedRuntime_arm.cpp @@ -1129,7 +1129,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, const Register sync_handle = R5; const Register sync_obj = R6; - const Register disp_hdr = altFP_7_11; + const Register basic_lock = altFP_7_11; const Register tmp = R8; Label slow_lock, lock_done, fast_lock; @@ -1140,7 +1140,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, __ mov(sync_handle, R1); log_trace(fastlock)("SharedRuntime lock fast"); - __ lightweight_lock(sync_obj /* object */, disp_hdr /* t1 */, tmp /* t2 */, Rtemp /* t3 */, + __ lightweight_lock(sync_obj /* object */, basic_lock /* t1 */, tmp /* t2 */, Rtemp /* t3 */, 0x7 /* savemask */, slow_lock); // Fall through to lock_done __ bind(lock_done); @@ -1255,7 +1255,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, // last_Java_frame is already set, so do call_VM manually; no exception can occur __ mov(R0, sync_obj); - __ mov(R1, disp_hdr); + __ mov(R1, basic_lock); __ mov(R2, Rthread); __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C)); @@ -1270,12 +1270,12 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, // Clear pending exception before reentering VM. // Can store the oop in register since it is a leaf call. - assert_different_registers(Rtmp_save1, sync_obj, disp_hdr); + assert_different_registers(Rtmp_save1, sync_obj, basic_lock); __ ldr(Rtmp_save1, Address(Rthread, Thread::pending_exception_offset())); Register zero = __ zero_register(Rtemp); __ str(zero, Address(Rthread, Thread::pending_exception_offset())); __ mov(R0, sync_obj); - __ mov(R1, disp_hdr); + __ mov(R1, basic_lock); __ mov(R2, Rthread); __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)); __ str(Rtmp_save1, Address(Rthread, Thread::pending_exception_offset())); diff --git a/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp b/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp index 438521b0a9b54..61780a739693d 100644 --- a/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp +++ b/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp @@ -268,9 +268,10 @@ void MonitorEnterStub::emit_code(LIR_Assembler* ce) { void MonitorExitStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); - if (_compute_lock) { - ce->monitor_address(_monitor_ix, _lock_reg); - } + + // lock_reg was destroyed by fast unlocking attempt => recompute it + ce->monitor_address(_monitor_ix, _lock_reg); + address stub = Runtime1::entry_for(ce->compilation()->has_fpu_code() ? StubId::c1_monitorexit_id : StubId::c1_monitorexit_nofpu_id); //__ load_const_optimized(R0, stub); __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); diff --git a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp index 73509c2213461..2edfd5d72249b 100644 --- a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp @@ -227,7 +227,7 @@ int LIR_Assembler::emit_unwind_handler() { MonitorExitStub* stub = nullptr; if (method()->is_synchronized()) { monitor_address(0, FrameMap::R4_opr); - stub = new MonitorExitStub(FrameMap::R4_opr, true, 0); + stub = new MonitorExitStub(FrameMap::R4_opr, 0); __ unlock_object(R5, R6, R4, *stub->entry()); __ bind(*stub->continuation()); } @@ -2614,7 +2614,6 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) { // Obj may not be an oop. if (op->code() == lir_lock) { MonitorEnterStub* stub = (MonitorEnterStub*)op->stub(); - assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); // Add debug info for NullPointerException only if one is possible. if (op->info() != nullptr) { if (!os::zero_page_read_protected() || !ImplicitNullChecks) { @@ -2626,7 +2625,6 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) { __ lock_object(hdr, obj, lock, op->scratch_opr()->as_register(), *op->stub()->entry()); } else { assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock"); - assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); __ unlock_object(hdr, obj, lock, *op->stub()->entry()); } __ bind(*op->stub()->continuation()); diff --git a/src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp b/src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp index 0c16e632e5af0..a8a21342248c3 100644 --- a/src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp +++ b/src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp @@ -204,10 +204,10 @@ void MonitorEnterStub::emit_code(LIR_Assembler* ce) { void MonitorExitStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); - if (_compute_lock) { - // lock_reg was destroyed by fast unlocking attempt => recompute it - ce->monitor_address(_monitor_ix, _lock_reg); - } + + // lock_reg was destroyed by fast unlocking attempt => recompute it + ce->monitor_address(_monitor_ix, _lock_reg); + ce->store_parameter(_lock_reg->as_register(), 0); // note: non-blocking leaf routine => no call info needed StubId exit_id; diff --git a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp index f60be85a141d6..302b1e540ce91 100644 --- a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp @@ -338,7 +338,7 @@ int LIR_Assembler::emit_unwind_handler() { MonitorExitStub* stub = nullptr; if (method()->is_synchronized()) { monitor_address(0, FrameMap::r10_opr); - stub = new MonitorExitStub(FrameMap::r10_opr, true, 0); + stub = new MonitorExitStub(FrameMap::r10_opr, 0); __ unlock_object(x15, x14, x10, x16, *stub->entry()); __ bind(*stub->continuation()); } @@ -1494,14 +1494,12 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) { Register lock = op->lock_opr()->as_register(); Register temp = op->scratch_opr()->as_register(); if (op->code() == lir_lock) { - assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); // add debug info for NullPointerException only if one is possible int null_check_offset = __ lock_object(hdr, obj, lock, temp, *op->stub()->entry()); if (op->info() != nullptr) { add_debug_info_for_null_check(null_check_offset, op->info()); } } else if (op->code() == lir_unlock) { - assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); __ unlock_object(hdr, obj, lock, temp, *op->stub()->entry()); } else { Unimplemented(); diff --git a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp index 8198192f50634..8e989de26650a 100644 --- a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp @@ -48,27 +48,27 @@ void C1_MacroAssembler::float_cmp(bool is_float, int unordered_result, } } -int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register temp, Label& slow_case) { - assert_different_registers(hdr, obj, disp_hdr, temp, t0, t1); +int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register basic_lock, Register temp, Label& slow_case) { + assert_different_registers(hdr, obj, basic_lock, temp, t0, t1); int null_check_offset = -1; verify_oop(obj); // save object being locked into the BasicObjectLock - sd(obj, Address(disp_hdr, BasicObjectLock::obj_offset())); + sd(obj, Address(basic_lock, BasicObjectLock::obj_offset())); null_check_offset = offset(); - lightweight_lock(disp_hdr, obj, hdr, temp, t1, slow_case); + lightweight_lock(basic_lock, obj, hdr, temp, t1, slow_case); return null_check_offset; } -void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Register temp, Label& slow_case) { - assert_different_registers(hdr, obj, disp_hdr, temp, t0, t1); +void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register basic_lock, Register temp, Label& slow_case) { + assert_different_registers(hdr, obj, basic_lock, temp, t0, t1); // load object - ld(obj, Address(disp_hdr, BasicObjectLock::obj_offset())); + ld(obj, Address(basic_lock, BasicObjectLock::obj_offset())); verify_oop(obj); lightweight_unlock(obj, hdr, temp, t1, slow_case); diff --git a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.hpp b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.hpp index 561053045ec77..040b70fb0797f 100644 --- a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.hpp +++ b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -56,18 +56,18 @@ using MacroAssembler::null_check; Register result); // locking - // hdr : must be x10, contents destroyed - // obj : must point to the object to lock, contents preserved - // disp_hdr: must point to the displaced header location, contents preserved - // temp : temporary register, must not be scratch register t0 or t1 + // hdr : must be x10, contents destroyed + // obj : must point to the object to lock, contents preserved + // basic_lock: must point to the basic_lock, contents preserved + // temp : temporary register, must not be scratch register t0 or t1 // returns code offset at which to add null check debug information - int lock_object(Register swap, Register obj, Register disp_hdr, Register temp, Label& slow_case); + int lock_object(Register swap, Register obj, Register basic_lock, Register temp, Label& slow_case); // unlocking - // hdr : contents destroyed - // obj : must point to the object to lock, contents preserved - // disp_hdr: must be x10 & must point to the displaced header location, contents destroyed - // temp : temporary register, must not be scratch register t0 or t1 + // hdr : contents destroyed + // obj : must point to the object to lock, contents preserved + // basic_lock: must be x10 & must point to the basic lock, contents destroyed + // temp : temporary register, must not be scratch register t0 or t1 void unlock_object(Register swap, Register obj, Register lock, Register temp, Label& slow_case); void initialize_object( diff --git a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp index 2cc1fb8eeff40..6c5a573d650a4 100644 --- a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp +++ b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp @@ -1679,8 +1679,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, Label lock_done; if (method->is_synchronized()) { - const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes(); - // Get the handle (the 2nd argument) __ mv(oop_handle_reg, c_rarg1); diff --git a/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp b/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp index 68e7114b3b685..f1272ee1a2219 100644 --- a/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp +++ b/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp @@ -234,12 +234,10 @@ void MonitorEnterStub::emit_code(LIR_Assembler* ce) { void MonitorExitStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); // Move address of the BasicObjectLock into Z_R1_scratch. - if (_compute_lock) { - // Lock_reg was destroyed by fast unlocking attempt => recompute it. - ce->monitor_address(_monitor_ix, FrameMap::as_opr(Z_R1_scratch)); - } else { - __ lgr_if_needed(Z_R1_scratch, _lock_reg->as_register()); - } + + // Lock_reg was destroyed by fast unlocking attempt => recompute it. + ce->monitor_address(_monitor_ix, FrameMap::as_opr(Z_R1_scratch)); + // Note: non-blocking leaf routine => no call info needed. StubId exit_id; if (ce->compilation()->has_fpu_code()) { diff --git a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp index 87dc8b9286d8f..836ac28c898ee 100644 --- a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp +++ b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp @@ -228,7 +228,7 @@ int LIR_Assembler::emit_unwind_handler() { // StubId::c1_monitorexit_id expects lock address in Z_R1_scratch. LIR_Opr lock = FrameMap::as_opr(Z_R1_scratch); monitor_address(0, lock); - stub = new MonitorExitStub(lock, true, 0); + stub = new MonitorExitStub(lock, 0); __ unlock_object(Rtmp1, Rtmp2, lock->as_register(), *stub->entry()); __ bind(*stub->continuation()); } @@ -2711,7 +2711,6 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) { Register hdr = op->hdr_opr()->as_register(); Register lock = op->lock_opr()->as_register(); if (op->code() == lir_lock) { - assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); // Add debug info for NullPointerException only if one is possible. if (op->info() != nullptr) { add_debug_info_for_null_check_here(op->info()); @@ -2719,7 +2718,6 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) { __ lock_object(hdr, obj, lock, *op->stub()->entry()); // done } else if (op->code() == lir_unlock) { - assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); __ unlock_object(hdr, obj, lock, *op->stub()->entry()); } else { ShouldNotReachHere(); diff --git a/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp b/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp index 2fd067a7749d0..95ce48f34db73 100644 --- a/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp +++ b/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp @@ -207,10 +207,10 @@ void MonitorEnterStub::emit_code(LIR_Assembler* ce) { void MonitorExitStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); - if (_compute_lock) { - // lock_reg was destroyed by fast unlocking attempt => recompute it - ce->monitor_address(_monitor_ix, _lock_reg); - } + + // lock_reg was destroyed by fast unlocking attempt => recompute it + ce->monitor_address(_monitor_ix, _lock_reg); + ce->store_parameter(_lock_reg->as_register(), 0); // note: non-blocking leaf routine => no call info needed StubId exit_id; diff --git a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp index a30bbe08c55dd..9cc41c556eec0 100644 --- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp @@ -412,7 +412,7 @@ int LIR_Assembler::emit_unwind_handler() { MonitorExitStub* stub = nullptr; if (method()->is_synchronized()) { monitor_address(0, FrameMap::rax_opr); - stub = new MonitorExitStub(FrameMap::rax_opr, true, 0); + stub = new MonitorExitStub(FrameMap::rax_opr, 0); __ unlock_object(rdi, rsi, rax, *stub->entry()); __ bind(*stub->continuation()); } @@ -2730,7 +2730,6 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) { Register hdr = op->hdr_opr()->as_register(); Register lock = op->lock_opr()->as_register(); if (op->code() == lir_lock) { - assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); Register tmp = op->scratch_opr()->as_register(); // add debug info for NullPointerException only if one is possible int null_check_offset = __ lock_object(hdr, obj, lock, tmp, *op->stub()->entry()); @@ -2739,7 +2738,6 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) { } // done } else if (op->code() == lir_unlock) { - assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); __ unlock_object(hdr, obj, lock, *op->stub()->entry()); } else { Unimplemented(); diff --git a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp index 36efeafa94062..c3d45f9d15dc3 100644 --- a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp @@ -41,32 +41,32 @@ #include "utilities/checkedCast.hpp" #include "utilities/globalDefinitions.hpp" -int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register tmp, Label& slow_case) { +int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register basic_lock, Register tmp, Label& slow_case) { assert(hdr == rax, "hdr must be rax, for the cmpxchg instruction"); - assert_different_registers(hdr, obj, disp_hdr, tmp); + assert_different_registers(hdr, obj, basic_lock, tmp); int null_check_offset = -1; verify_oop(obj); // save object being locked into the BasicObjectLock - movptr(Address(disp_hdr, BasicObjectLock::obj_offset()), obj); + movptr(Address(basic_lock, BasicObjectLock::obj_offset()), obj); null_check_offset = offset(); - lightweight_lock(disp_hdr, obj, hdr, tmp, slow_case); + lightweight_lock(basic_lock, obj, hdr, tmp, slow_case); return null_check_offset; } -void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) { - assert(disp_hdr == rax, "disp_hdr must be rax, for the cmpxchg instruction"); - assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different"); +void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register basic_lock, Label& slow_case) { + assert(basic_lock == rax, "basic_lock must be rax, for the cmpxchg instruction"); + assert(hdr != obj && hdr != basic_lock && obj != basic_lock, "registers must be different"); // load object - movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset())); + movptr(obj, Address(basic_lock, BasicObjectLock::obj_offset())); verify_oop(obj); - lightweight_unlock(obj, disp_hdr, hdr, slow_case); + lightweight_unlock(obj, rax, hdr, slow_case); } diff --git a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.hpp b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.hpp index 6344a7b6ef19e..f33e47aadb3af 100644 --- a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.hpp +++ b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,17 +46,17 @@ void initialize_body(Register obj, Register len_in_bytes, int hdr_size_in_bytes, Register t1); // locking - // hdr : must be rax, contents destroyed - // obj : must point to the object to lock, contents preserved - // disp_hdr: must point to the displaced header location, contents preserved + // hdr : must be rax, contents destroyed + // obj : must point to the object to lock, contents preserved + // basic_lock: must point to the basic lock, contents preserved // returns code offset at which to add null check debug information - int lock_object (Register swap, Register obj, Register disp_hdr, Register tmp, Label& slow_case); + int lock_object (Register swap, Register obj, Register basic_lock, Register tmp, Label& slow_case); // unlocking - // hdr : contents destroyed - // obj : must point to the object to lock, contents preserved - // disp_hdr: must be eax & must point to the displaced header location, contents destroyed - void unlock_object(Register swap, Register obj, Register lock, Label& slow_case); + // hdr : contents destroyed + // obj : must point to the object to lock, contents preserved + // basic_lock: must be eax & must point to the basic lock, contents destroyed + void unlock_object(Register swap, Register obj, Register basic_lock, Label& slow_case); void initialize_object( Register obj, // result: pointer to object after successful allocation diff --git a/src/hotspot/share/c1/c1_CodeStubs.hpp b/src/hotspot/share/c1/c1_CodeStubs.hpp index 5d1c51bdbbf43..a02368487c5a2 100644 --- a/src/hotspot/share/c1/c1_CodeStubs.hpp +++ b/src/hotspot/share/c1/c1_CodeStubs.hpp @@ -371,21 +371,16 @@ class MonitorEnterStub: public MonitorAccessStub { class MonitorExitStub: public MonitorAccessStub { private: - bool _compute_lock; int _monitor_ix; public: - MonitorExitStub(LIR_Opr lock_reg, bool compute_lock, int monitor_ix) + MonitorExitStub(LIR_Opr lock_reg, int monitor_ix) : MonitorAccessStub(LIR_OprFact::illegalOpr, lock_reg), - _compute_lock(compute_lock), _monitor_ix(monitor_ix) { } + _monitor_ix(monitor_ix) { } virtual void emit_code(LIR_Assembler* e); virtual void visit(LIR_OpVisitState* visitor) { assert(_obj_reg->is_illegal(), "unused"); - if (_compute_lock) { - visitor->do_temp(_lock_reg); - } else { - visitor->do_input(_lock_reg); - } + visitor->do_temp(_lock_reg); } #ifndef PRODUCT virtual void print_name(outputStream* out) const { out->print("MonitorExitStub"); } diff --git a/src/hotspot/share/c1/c1_LIRGenerator.cpp b/src/hotspot/share/c1/c1_LIRGenerator.cpp index b30667dcac311..66adfa5ed66a1 100644 --- a/src/hotspot/share/c1/c1_LIRGenerator.cpp +++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp @@ -635,7 +635,7 @@ void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, L // setup registers LIR_Opr hdr = lock; lock = new_hdr; - CodeStub* slow_path = new MonitorExitStub(lock, true, monitor_no); + CodeStub* slow_path = new MonitorExitStub(lock, monitor_no); __ load_stack_address_monitor(monitor_no, lock); __ unlock_object(hdr, object, lock, scratch, slow_path); } diff --git a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp index 47ebe5aa7a772..1a83779ff62f8 100644 --- a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp +++ b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp @@ -168,7 +168,7 @@ nonstatic_field(Array, _length, int) \ nonstatic_field(Array, _data[0], Klass*) \ \ - volatile_nonstatic_field(BasicLock, _metadata, uintptr_t) \ + volatile_nonstatic_field(BasicLock, _monitor, ObjectMonitor*) \ \ static_field(CodeCache, _low_bound, address) \ static_field(CodeCache, _high_bound, address) \ diff --git a/src/hotspot/share/runtime/basicLock.cpp b/src/hotspot/share/runtime/basicLock.cpp index 71082e24bb9b6..4a6e7402dfa52 100644 --- a/src/hotspot/share/runtime/basicLock.cpp +++ b/src/hotspot/share/runtime/basicLock.cpp @@ -74,7 +74,7 @@ void BasicLock::move_to(oop obj, BasicLock* dest) { } #ifdef ASSERT else { - dest->set_bad_metadata_deopt(); + dest->set_bad_monitor_deopt(); } #endif } diff --git a/src/hotspot/share/runtime/basicLock.hpp b/src/hotspot/share/runtime/basicLock.hpp index 8ed38747c7448..8048444266f4c 100644 --- a/src/hotspot/share/runtime/basicLock.hpp +++ b/src/hotspot/share/runtime/basicLock.hpp @@ -37,23 +37,21 @@ class BasicLock { private: // Used as a cache of the ObjectMonitor* used when locking. Must either // be nullptr or the ObjectMonitor* used when locking. - volatile uintptr_t _metadata; + ObjectMonitor* volatile _monitor; - uintptr_t get_metadata() const { return Atomic::load(&_metadata); } - void set_metadata(uintptr_t value) { Atomic::store(&_metadata, value); } - static int metadata_offset_in_bytes() { return (int)offset_of(BasicLock, _metadata); } + ObjectMonitor* get_monitor() const { return Atomic::load(&_monitor); } + void set_monitor(ObjectMonitor* mon) { Atomic::store(&_monitor, mon); } + static int monitor_offset_in_bytes() { return (int)offset_of(BasicLock, _monitor); } public: - BasicLock() : _metadata(0) {} + BasicLock() : _monitor(nullptr) {} - void set_bad_metadata_deopt() { set_metadata(badDispHeaderDeopt); } - - static int displaced_header_offset_in_bytes() { return metadata_offset_in_bytes(); } + void set_bad_monitor_deopt() { set_monitor(reinterpret_cast(badDispHeaderDeopt)); } inline ObjectMonitor* object_monitor_cache() const; inline void clear_object_monitor_cache(); inline void set_object_monitor_cache(ObjectMonitor* mon); - static int object_monitor_cache_offset_in_bytes() { return metadata_offset_in_bytes(); } + static int object_monitor_cache_offset_in_bytes() { return monitor_offset_in_bytes(); } void print_on(outputStream* st, oop owner) const; diff --git a/src/hotspot/share/runtime/basicLock.inline.hpp b/src/hotspot/share/runtime/basicLock.inline.hpp index 2ea1fe2371c08..9f0f26ee9570d 100644 --- a/src/hotspot/share/runtime/basicLock.inline.hpp +++ b/src/hotspot/share/runtime/basicLock.inline.hpp @@ -32,23 +32,23 @@ inline ObjectMonitor* BasicLock::object_monitor_cache() const { assert(UseObjectMonitorTable, "must be"); #if !defined(ZERO) && (defined(X86) || defined(AARCH64) || defined(RISCV64) || defined(PPC64) || defined(S390)) - return reinterpret_cast(get_metadata()); + return reinterpret_cast(get_monitor()); #else // Other platforms do not make use of the cache yet, // and are not as careful with maintaining the invariant - // that the metadata either is nullptr or ObjectMonitor*. + // that the monitor either is nullptr or a valid ObjectMonitor*. return nullptr; #endif } inline void BasicLock::clear_object_monitor_cache() { assert(UseObjectMonitorTable, "must be"); - set_metadata(0); + set_monitor(nullptr); } inline void BasicLock::set_object_monitor_cache(ObjectMonitor* mon) { assert(UseObjectMonitorTable, "must be"); - set_metadata(reinterpret_cast(mon)); + set_monitor(mon); } #endif // SHARE_RUNTIME_BASICLOCK_INLINE_HPP diff --git a/src/hotspot/share/runtime/deoptimization.cpp b/src/hotspot/share/runtime/deoptimization.cpp index 693815c6fc477..119b19676f21d 100644 --- a/src/hotspot/share/runtime/deoptimization.cpp +++ b/src/hotspot/share/runtime/deoptimization.cpp @@ -1656,7 +1656,7 @@ bool Deoptimization::relock_objects(JavaThread* thread, GrowableArraylock()->set_bad_metadata_deopt(); + mon_info->lock()->set_bad_monitor_deopt(); } #endif JvmtiDeferredUpdates::inc_relock_count_after_wait(deoptee_thread); diff --git a/src/hotspot/share/runtime/synchronizer.inline.hpp b/src/hotspot/share/runtime/synchronizer.inline.hpp index a44fe81727695..6a850e5c8ca30 100644 --- a/src/hotspot/share/runtime/synchronizer.inline.hpp +++ b/src/hotspot/share/runtime/synchronizer.inline.hpp @@ -45,7 +45,7 @@ inline ObjectMonitor* ObjectSynchronizer::read_monitor(Thread* current, oop obj, inline void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) { assert(current == Thread::current(), "must be"); - LightweightSynchronizer::enter(obj, lock, current); + LightweightSynchronizer::enter(obj, lock, current); } inline bool ObjectSynchronizer::quick_enter(oop obj, BasicLock* lock, JavaThread* current) { diff --git a/src/hotspot/share/runtime/vmStructs.cpp b/src/hotspot/share/runtime/vmStructs.cpp index bc026887b84f2..cfd5c065574eb 100644 --- a/src/hotspot/share/runtime/vmStructs.cpp +++ b/src/hotspot/share/runtime/vmStructs.cpp @@ -674,7 +674,7 @@ unchecked_nonstatic_field(ObjectMonitor, _object, sizeof(void *)) /* NOTE: no type */ \ volatile_nonstatic_field(ObjectMonitor, _owner, int64_t) \ volatile_nonstatic_field(ObjectMonitor, _next_om, ObjectMonitor*) \ - volatile_nonstatic_field(BasicLock, _metadata, uintptr_t) \ + volatile_nonstatic_field(BasicLock, _monitor, ObjectMonitor*) \ nonstatic_field(ObjectMonitor, _contentions, int) \ volatile_nonstatic_field(ObjectMonitor, _waiters, int) \ volatile_nonstatic_field(ObjectMonitor, _recursions, intx) \ diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Mark.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Mark.java index 1ea34fe6cd2c4..c41372810a35f 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Mark.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Mark.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -151,12 +151,6 @@ public boolean mustBePreserved() { public boolean hasLocker() { return ((value() & lockMaskInPlace) == lockedValue); } - public BasicLock locker() { - if (Assert.ASSERTS_ENABLED) { - Assert.that(hasLocker(), "check"); - } - return new BasicLock(valueAsAddress()); - } public boolean hasMonitor() { return ((value() & monitorValue) != 0); } diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/BasicLock.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/BasicLock.java index 979e08cfb8a24..6ca53b4c54550 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/BasicLock.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/BasicLock.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,23 +36,11 @@ public class BasicLock extends VMObject { static { VM.registerVMInitializedObserver(new Observer() { public void update(Observable o, Object data) { - initialize(VM.getVM().getTypeDataBase()); } }); } - private static synchronized void initialize(TypeDataBase db) throws WrongTypeException { - Type type = db.lookupType("BasicLock"); - displacedHeaderField = type.getCIntegerField("_metadata"); - } - - private static CIntegerField displacedHeaderField; - public BasicLock(Address addr) { super(addr); } - - public Mark displacedHeader() { - return new Mark(addr.addOffsetTo(displacedHeaderField.getOffset())); - } } From 854576384bf423a48b55a5f81a713a5fa770b2cb Mon Sep 17 00:00:00 2001 From: Fredrik Bredberg Date: Wed, 24 Sep 2025 13:55:44 +0200 Subject: [PATCH 2/3] Update after review --- src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.hpp | 2 +- src/hotspot/share/jvmci/vmStructs_jvmci.cpp | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.hpp b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.hpp index 040b70fb0797f..16e7688404998 100644 --- a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.hpp +++ b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.hpp @@ -68,7 +68,7 @@ using MacroAssembler::null_check; // obj : must point to the object to lock, contents preserved // basic_lock: must be x10 & must point to the basic lock, contents destroyed // temp : temporary register, must not be scratch register t0 or t1 - void unlock_object(Register swap, Register obj, Register lock, Register temp, Label& slow_case); + void unlock_object(Register swap, Register obj, Register basic_lock, Register temp, Label& slow_case); void initialize_object( Register obj, // result: pointer to object after successful allocation diff --git a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp index 8ce69c61b7787..2b7df060402a6 100644 --- a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp +++ b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp @@ -168,8 +168,6 @@ nonstatic_field(Array, _length, int) \ nonstatic_field(Array, _data[0], Klass*) \ \ - volatile_nonstatic_field(BasicLock, _monitor, ObjectMonitor*) \ - \ static_field(CodeCache, _low_bound, address) \ static_field(CodeCache, _high_bound, address) \ \ From 43f9c0af25c7df720759c8e56e68fd136375fefb Mon Sep 17 00:00:00 2001 From: Fredrik Bredberg Date: Wed, 24 Sep 2025 14:59:47 +0200 Subject: [PATCH 3/3] Fixed a mixup --- src/hotspot/share/jvmci/vmStructs_jvmci.cpp | 2 ++ src/hotspot/share/runtime/vmStructs.cpp | 1 - 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp index 2b7df060402a6..8ce69c61b7787 100644 --- a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp +++ b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp @@ -168,6 +168,8 @@ nonstatic_field(Array, _length, int) \ nonstatic_field(Array, _data[0], Klass*) \ \ + volatile_nonstatic_field(BasicLock, _monitor, ObjectMonitor*) \ + \ static_field(CodeCache, _low_bound, address) \ static_field(CodeCache, _high_bound, address) \ \ diff --git a/src/hotspot/share/runtime/vmStructs.cpp b/src/hotspot/share/runtime/vmStructs.cpp index cf662dcc23ccf..ac095be69362d 100644 --- a/src/hotspot/share/runtime/vmStructs.cpp +++ b/src/hotspot/share/runtime/vmStructs.cpp @@ -682,7 +682,6 @@ unchecked_nonstatic_field(ObjectMonitor, _object, sizeof(void *)) /* NOTE: no type */ \ volatile_nonstatic_field(ObjectMonitor, _owner, int64_t) \ volatile_nonstatic_field(ObjectMonitor, _next_om, ObjectMonitor*) \ - volatile_nonstatic_field(BasicLock, _monitor, ObjectMonitor*) \ nonstatic_field(ObjectMonitor, _contentions, int) \ volatile_nonstatic_field(ObjectMonitor, _waiters, int) \ volatile_nonstatic_field(ObjectMonitor, _recursions, intx) \