Skip to content

Commit 0d2980c

Browse files
committed
8258192: Obsolete the CriticalJNINatives flag
Reviewed-by: mdoerr, shade
1 parent 5a2452c commit 0d2980c

24 files changed

+176
-1849
lines changed

src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp

Lines changed: 30 additions & 218 deletions
Original file line numberDiff line numberDiff line change
@@ -1112,69 +1112,6 @@ static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMR
11121112
}
11131113
}
11141114

1115-
// Unpack an array argument into a pointer to the body and the length
1116-
// if the array is non-null, otherwise pass 0 for both.
1117-
static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) { Unimplemented(); }
1118-
1119-
1120-
class ComputeMoveOrder: public StackObj {
1121-
class MoveOperation: public ResourceObj {
1122-
friend class ComputeMoveOrder;
1123-
private:
1124-
VMRegPair _src;
1125-
VMRegPair _dst;
1126-
int _src_index;
1127-
int _dst_index;
1128-
bool _processed;
1129-
MoveOperation* _next;
1130-
MoveOperation* _prev;
1131-
1132-
static int get_id(VMRegPair r) { Unimplemented(); return 0; }
1133-
1134-
public:
1135-
MoveOperation(int src_index, VMRegPair src, int dst_index, VMRegPair dst):
1136-
_src(src)
1137-
, _dst(dst)
1138-
, _src_index(src_index)
1139-
, _dst_index(dst_index)
1140-
, _processed(false)
1141-
, _next(NULL)
1142-
, _prev(NULL) { Unimplemented(); }
1143-
1144-
VMRegPair src() const { Unimplemented(); return _src; }
1145-
int src_id() const { Unimplemented(); return 0; }
1146-
int src_index() const { Unimplemented(); return 0; }
1147-
VMRegPair dst() const { Unimplemented(); return _src; }
1148-
void set_dst(int i, VMRegPair dst) { Unimplemented(); }
1149-
int dst_index() const { Unimplemented(); return 0; }
1150-
int dst_id() const { Unimplemented(); return 0; }
1151-
MoveOperation* next() const { Unimplemented(); return 0; }
1152-
MoveOperation* prev() const { Unimplemented(); return 0; }
1153-
void set_processed() { Unimplemented(); }
1154-
bool is_processed() const { Unimplemented(); return 0; }
1155-
1156-
// insert
1157-
void break_cycle(VMRegPair temp_register) { Unimplemented(); }
1158-
1159-
void link(GrowableArray<MoveOperation*>& killer) { Unimplemented(); }
1160-
};
1161-
1162-
private:
1163-
GrowableArray<MoveOperation*> edges;
1164-
1165-
public:
1166-
ComputeMoveOrder(int total_in_args, VMRegPair* in_regs, int total_c_args, VMRegPair* out_regs,
1167-
BasicType* in_sig_bt, GrowableArray<int>& arg_order, VMRegPair tmp_vmreg) { Unimplemented(); }
1168-
1169-
// Collected all the move operations
1170-
void add_edge(int src_index, VMRegPair src, int dst_index, VMRegPair dst) { Unimplemented(); }
1171-
1172-
// Walk the edges breaking cycles between moves. The result list
1173-
// can be walked in order to produce the proper set of loads
1174-
GrowableArray<MoveOperation*>* get_store_order(VMRegPair temp_register) { Unimplemented(); return 0; }
1175-
};
1176-
1177-
11781115
static void rt_call(MacroAssembler* masm, address dest) {
11791116
CodeBlob *cb = CodeCache::find_blob(dest);
11801117
if (cb) {
@@ -1287,8 +1224,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
12871224
int compile_id,
12881225
BasicType* in_sig_bt,
12891226
VMRegPair* in_regs,
1290-
BasicType ret_type,
1291-
address critical_entry) {
1227+
BasicType ret_type) {
12921228
if (method->is_method_handle_intrinsic()) {
12931229
vmIntrinsics::ID iid = method->intrinsic_id();
12941230
intptr_t start = (intptr_t)__ pc();
@@ -1313,12 +1249,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
13131249
in_ByteSize(-1),
13141250
(OopMapSet*)NULL);
13151251
}
1316-
bool is_critical_native = true;
1317-
address native_func = critical_entry;
1318-
if (native_func == NULL) {
1319-
native_func = method->native_function();
1320-
is_critical_native = false;
1321-
}
1252+
address native_func = method->native_function();
13221253
assert(native_func != NULL, "must have function");
13231254

13241255
// An OopMap for lock (and class if static)
@@ -1332,55 +1263,20 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
13321263
// the hidden arguments as arg[0] and possibly arg[1] (static method)
13331264

13341265
const int total_in_args = method->size_of_parameters();
1335-
int total_c_args = total_in_args;
1336-
if (!is_critical_native) {
1337-
total_c_args += 1;
1338-
if (method->is_static()) {
1339-
total_c_args++;
1340-
}
1341-
} else {
1342-
for (int i = 0; i < total_in_args; i++) {
1343-
if (in_sig_bt[i] == T_ARRAY) {
1344-
total_c_args++;
1345-
}
1346-
}
1347-
}
1266+
int total_c_args = total_in_args + (method->is_static() ? 2 : 1);
13481267

13491268
BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
13501269
VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
13511270
BasicType* in_elem_bt = NULL;
13521271

13531272
int argc = 0;
1354-
if (!is_critical_native) {
1355-
out_sig_bt[argc++] = T_ADDRESS;
1356-
if (method->is_static()) {
1357-
out_sig_bt[argc++] = T_OBJECT;
1358-
}
1273+
out_sig_bt[argc++] = T_ADDRESS;
1274+
if (method->is_static()) {
1275+
out_sig_bt[argc++] = T_OBJECT;
1276+
}
13591277

1360-
for (int i = 0; i < total_in_args ; i++ ) {
1361-
out_sig_bt[argc++] = in_sig_bt[i];
1362-
}
1363-
} else {
1364-
in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
1365-
SignatureStream ss(method->signature());
1366-
for (int i = 0; i < total_in_args ; i++ ) {
1367-
if (in_sig_bt[i] == T_ARRAY) {
1368-
// Arrays are passed as int, elem* pair
1369-
out_sig_bt[argc++] = T_INT;
1370-
out_sig_bt[argc++] = T_ADDRESS;
1371-
ss.skip_array_prefix(1); // skip one '['
1372-
assert(ss.is_primitive(), "primitive type expected");
1373-
in_elem_bt[i] = ss.type();
1374-
} else {
1375-
out_sig_bt[argc++] = in_sig_bt[i];
1376-
in_elem_bt[i] = T_VOID;
1377-
}
1378-
if (in_sig_bt[i] != T_VOID) {
1379-
assert(in_sig_bt[i] == ss.type() ||
1380-
in_sig_bt[i] == T_ARRAY, "must match");
1381-
ss.next();
1382-
}
1383-
}
1278+
for (int i = 0; i < total_in_args ; i++ ) {
1279+
out_sig_bt[argc++] = in_sig_bt[i];
13841280
}
13851281

13861282
// Now figure out where the args must be stored and how much stack space
@@ -1402,34 +1298,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
14021298

14031299
// Now the space for the inbound oop handle area
14041300
int total_save_slots = 8 * VMRegImpl::slots_per_word; // 8 arguments passed in registers
1405-
if (is_critical_native) {
1406-
// Critical natives may have to call out so they need a save area
1407-
// for register arguments.
1408-
int double_slots = 0;
1409-
int single_slots = 0;
1410-
for ( int i = 0; i < total_in_args; i++) {
1411-
if (in_regs[i].first()->is_Register()) {
1412-
const Register reg = in_regs[i].first()->as_Register();
1413-
switch (in_sig_bt[i]) {
1414-
case T_BOOLEAN:
1415-
case T_BYTE:
1416-
case T_SHORT:
1417-
case T_CHAR:
1418-
case T_INT: single_slots++; break;
1419-
case T_ARRAY: // specific to LP64 (7145024)
1420-
case T_LONG: double_slots++; break;
1421-
default: ShouldNotReachHere();
1422-
}
1423-
} else if (in_regs[i].first()->is_FloatRegister()) {
1424-
ShouldNotReachHere();
1425-
}
1426-
}
1427-
total_save_slots = double_slots * 2 + single_slots;
1428-
// align the save area
1429-
if (double_slots != 0) {
1430-
stack_slots = align_up(stack_slots, 2);
1431-
}
1432-
}
14331301

14341302
int oop_handle_offset = stack_slots;
14351303
stack_slots += total_save_slots;
@@ -1596,43 +1464,22 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
15961464

15971465
#endif /* ASSERT */
15981466

1599-
// This may iterate in two different directions depending on the
1600-
// kind of native it is. The reason is that for regular JNI natives
1601-
// the incoming and outgoing registers are offset upwards and for
1602-
// critical natives they are offset down.
1467+
// For JNI natives the incoming and outgoing registers are offset upwards.
16031468
GrowableArray<int> arg_order(2 * total_in_args);
16041469
VMRegPair tmp_vmreg;
16051470
tmp_vmreg.set2(r19->as_VMReg());
16061471

1607-
if (!is_critical_native) {
1608-
for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
1609-
arg_order.push(i);
1610-
arg_order.push(c_arg);
1611-
}
1612-
} else {
1613-
// Compute a valid move order, using tmp_vmreg to break any cycles
1614-
ComputeMoveOrder cmo(total_in_args, in_regs, total_c_args, out_regs, in_sig_bt, arg_order, tmp_vmreg);
1472+
for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
1473+
arg_order.push(i);
1474+
arg_order.push(c_arg);
16151475
}
16161476

16171477
int temploc = -1;
16181478
for (int ai = 0; ai < arg_order.length(); ai += 2) {
16191479
int i = arg_order.at(ai);
16201480
int c_arg = arg_order.at(ai + 1);
16211481
__ block_comment(err_msg("move %d -> %d", i, c_arg));
1622-
if (c_arg == -1) {
1623-
assert(is_critical_native, "should only be required for critical natives");
1624-
// This arg needs to be moved to a temporary
1625-
__ mov(tmp_vmreg.first()->as_Register(), in_regs[i].first()->as_Register());
1626-
in_regs[i] = tmp_vmreg;
1627-
temploc = i;
1628-
continue;
1629-
} else if (i == -1) {
1630-
assert(is_critical_native, "should only be required for critical natives");
1631-
// Read from the temporary location
1632-
assert(temploc != -1, "must be valid");
1633-
i = temploc;
1634-
temploc = -1;
1635-
}
1482+
assert(c_arg != -1 && i != -1, "wrong order");
16361483
#ifdef ASSERT
16371484
if (in_regs[i].first()->is_Register()) {
16381485
assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
@@ -1647,21 +1494,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
16471494
#endif /* ASSERT */
16481495
switch (in_sig_bt[i]) {
16491496
case T_ARRAY:
1650-
if (is_critical_native) {
1651-
unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
1652-
c_arg++;
1653-
#ifdef ASSERT
1654-
if (out_regs[c_arg].first()->is_Register()) {
1655-
reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1656-
} else if (out_regs[c_arg].first()->is_FloatRegister()) {
1657-
freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true;
1658-
}
1659-
#endif
1660-
int_args++;
1661-
break;
1662-
}
16631497
case T_OBJECT:
1664-
assert(!is_critical_native, "no oop arguments");
16651498
object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
16661499
((i == 0) && (!is_static)),
16671500
&receiver_offset);
@@ -1701,7 +1534,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
17011534
int c_arg = total_c_args - total_in_args;
17021535

17031536
// Pre-load a static method's oop into c_rarg1.
1704-
if (method->is_static() && !is_critical_native) {
1537+
if (method->is_static()) {
17051538

17061539
// load oop into a register
17071540
__ movoop(c_rarg1,
@@ -1759,7 +1592,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
17591592
Label lock_done;
17601593

17611594
if (method->is_synchronized()) {
1762-
assert(!is_critical_native, "unhandled");
17631595

17641596
const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
17651597

@@ -1813,14 +1645,12 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
18131645
// Finally just about ready to make the JNI call
18141646

18151647
// get JNIEnv* which is first argument to native
1816-
if (!is_critical_native) {
1817-
__ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
1648+
__ lea(c_rarg0, Address(rthread, in_bytes(JavaThread::jni_environment_offset())));
18181649

1819-
// Now set thread in native
1820-
__ mov(rscratch1, _thread_in_native);
1821-
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1822-
__ stlrw(rscratch1, rscratch2);
1823-
}
1650+
// Now set thread in native
1651+
__ mov(rscratch1, _thread_in_native);
1652+
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
1653+
__ stlrw(rscratch1, rscratch2);
18241654

18251655
rt_call(masm, native_func);
18261656

@@ -1851,18 +1681,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
18511681
Label safepoint_in_progress, safepoint_in_progress_done;
18521682
Label after_transition;
18531683

1854-
// If this is a critical native, check for a safepoint or suspend request after the call.
1855-
// If a safepoint is needed, transition to native, then to native_trans to handle
1856-
// safepoints like the native methods that are not critical natives.
1857-
if (is_critical_native) {
1858-
Label needs_safepoint;
1859-
__ safepoint_poll(needs_safepoint, false /* at_return */, true /* acquire */, false /* in_nmethod */);
1860-
__ ldrw(rscratch1, Address(rthread, JavaThread::suspend_flags_offset()));
1861-
__ cbnzw(rscratch1, needs_safepoint);
1862-
__ b(after_transition);
1863-
__ bind(needs_safepoint);
1864-
}
1865-
18661684
// Switch thread to "native transition" state before reading the synchronization state.
18671685
// This additional state is necessary because reading and testing the synchronization
18681686
// state is not atomic w.r.t. GC, as this scenario demonstrates:
@@ -1971,32 +1789,26 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
19711789
__ str(zr, Address(rthread, JavaThread::pending_jni_exception_check_fn_offset()));
19721790
}
19731791

1974-
if (!is_critical_native) {
1975-
// reset handle block
1976-
__ ldr(r2, Address(rthread, JavaThread::active_handles_offset()));
1977-
__ str(zr, Address(r2, JNIHandleBlock::top_offset_in_bytes()));
1978-
}
1792+
// reset handle block
1793+
__ ldr(r2, Address(rthread, JavaThread::active_handles_offset()));
1794+
__ str(zr, Address(r2, JNIHandleBlock::top_offset_in_bytes()));
19791795

19801796
__ leave();
19811797

1982-
if (!is_critical_native) {
1983-
// Any exception pending?
1984-
__ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
1985-
__ cbnz(rscratch1, exception_pending);
1986-
}
1798+
// Any exception pending?
1799+
__ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
1800+
__ cbnz(rscratch1, exception_pending);
19871801

19881802
// We're done
19891803
__ ret(lr);
19901804

19911805
// Unexpected paths are out of line and go here
19921806

1993-
if (!is_critical_native) {
1994-
// forward the exception
1995-
__ bind(exception_pending);
1807+
// forward the exception
1808+
__ bind(exception_pending);
19961809

1997-
// and forward the exception
1998-
__ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
1999-
}
1810+
// and forward the exception
1811+
__ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
20001812

20011813
// Slow path locking & unlocking
20021814
if (method->is_synchronized()) {

src/hotspot/cpu/aarch64/vm_version_aarch64.cpp

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -472,6 +472,4 @@ void VM_Version::initialize() {
472472
#endif
473473

474474
_spin_wait = get_spin_wait_desc();
475-
476-
UNSUPPORTED_OPTION(CriticalJNINatives);
477475
}

0 commit comments

Comments
 (0)