@@ -67,9 +67,7 @@ void NativeCall::print() {
6767// Inserts a native call instruction at a given pc
6868void NativeCall::insert (address code_pos, address entry) {
6969 intptr_t disp = (intptr_t )entry - ((intptr_t )code_pos + 1 + 4 );
70- #ifdef AMD64
7170 guarantee (disp == (intptr_t )(jint)disp, " must be 32-bit offset" );
72- #endif // AMD64
7371 *code_pos = instruction_code;
7472 *((int32_t *)(code_pos+1 )) = (int32_t ) disp;
7573 ICache::invalidate_range (code_pos, instruction_size);
@@ -157,7 +155,6 @@ void NativeCall::set_destination_mt_safe(address dest) {
157155
158156
159157void NativeMovConstReg::verify () {
160- #ifdef AMD64
161158 // make sure code pattern is actually a mov reg64, imm64 instruction
162159 bool valid_rex_prefix = ubyte_at (0 ) == Assembler::REX_W || ubyte_at (0 ) == Assembler::REX_WB;
163160 bool valid_rex2_prefix = ubyte_at (0 ) == Assembler::REX2 &&
@@ -169,12 +166,6 @@ void NativeMovConstReg::verify() {
169166 print ();
170167 fatal (" not a REX.W[B] mov reg64, imm64" );
171168 }
172- #else
173- // make sure code pattern is actually a mov reg, imm32 instruction
174- u_char test_byte = *(u_char*)instruction_address ();
175- u_char test_byte_2 = test_byte & ( 0xff ^ register_mask);
176- if (test_byte_2 != instruction_code) fatal (" not a mov reg, imm32" );
177- #endif // AMD64
178169}
179170
180171
@@ -192,12 +183,10 @@ int NativeMovRegMem::instruction_start() const {
192183 // See comment in Assembler::locate_operand() about VEX prefixes.
193184 if (instr_0 == instruction_VEX_prefix_2bytes) {
194185 assert ((UseAVX > 0 ), " shouldn't have VEX prefix" );
195- NOT_LP64 (assert ((0xC0 & ubyte_at (1 )) == 0xC0 , " shouldn't have LDS and LES instructions" ));
196186 return 2 ;
197187 }
198188 if (instr_0 == instruction_VEX_prefix_3bytes) {
199189 assert ((UseAVX > 0 ), " shouldn't have VEX prefix" );
200- NOT_LP64 (assert ((0xC0 & ubyte_at (1 )) == 0xC0 , " shouldn't have LDS and LES instructions" ));
201190 return 3 ;
202191 }
203192 if (instr_0 == instruction_EVEX_prefix_4bytes) {
@@ -313,8 +302,7 @@ void NativeMovRegMem::print() {
313302void NativeLoadAddress::verify () {
314303 // make sure code pattern is actually a mov [reg+offset], reg instruction
315304 u_char test_byte = *(u_char*)instruction_address ();
316- if ( ! ((test_byte == lea_instruction_code)
317- LP64_ONLY (|| (test_byte == mov64_instruction_code) ))) {
305+ if ((test_byte != lea_instruction_code) && (test_byte != mov64_instruction_code)) {
318306 fatal (" not a lea reg, [reg+offs] instruction" );
319307 }
320308}
@@ -340,9 +328,7 @@ void NativeJump::verify() {
340328
341329void NativeJump::insert (address code_pos, address entry) {
342330 intptr_t disp = (intptr_t )entry - ((intptr_t )code_pos + 1 + 4 );
343- #ifdef AMD64
344331 guarantee (disp == (intptr_t )(int32_t )disp, " must be 32-bit offset" );
345- #endif // AMD64
346332
347333 *code_pos = instruction_code;
348334 *((int32_t *)(code_pos + 1 )) = (int32_t )disp;
@@ -355,11 +341,7 @@ void NativeJump::check_verified_entry_alignment(address entry, address verified_
355341 // in use. The patching in that instance must happen only when certain
356342 // alignment restrictions are true. These guarantees check those
357343 // conditions.
358- #ifdef AMD64
359344 const int linesize = 64 ;
360- #else
361- const int linesize = 32 ;
362- #endif // AMD64
363345
364346 // Must be wordSize aligned
365347 guarantee (((uintptr_t ) verified_entry & (wordSize -1 )) == 0 ,
@@ -386,7 +368,6 @@ void NativeJump::check_verified_entry_alignment(address entry, address verified_
386368//
387369void NativeJump::patch_verified_entry (address entry, address verified_entry, address dest) {
388370 // complete jump instruction (to be inserted) is in code_buffer;
389- #ifdef _LP64
390371 union {
391372 jlong cb_long;
392373 unsigned char code_buffer[8 ];
@@ -402,43 +383,6 @@ void NativeJump::patch_verified_entry(address entry, address verified_entry, add
402383
403384 Atomic::store ((jlong *) verified_entry, u.cb_long );
404385 ICache::invalidate_range (verified_entry, 8 );
405-
406- #else
407- unsigned char code_buffer[5 ];
408- code_buffer[0 ] = instruction_code;
409- intptr_t disp = (intptr_t )dest - ((intptr_t )verified_entry + 1 + 4 );
410- *(int32_t *)(code_buffer + 1 ) = (int32_t )disp;
411-
412- check_verified_entry_alignment (entry, verified_entry);
413-
414- // Can't call nativeJump_at() because it's asserts jump exists
415- NativeJump* n_jump = (NativeJump*) verified_entry;
416-
417- // First patch dummy jmp in place
418-
419- unsigned char patch[4 ];
420- assert (sizeof (patch)==sizeof (int32_t ), " sanity check" );
421- patch[0 ] = 0xEB ; // jmp rel8
422- patch[1 ] = 0xFE ; // jmp to self
423- patch[2 ] = 0xEB ;
424- patch[3 ] = 0xFE ;
425-
426- // First patch dummy jmp in place
427- *(int32_t *)verified_entry = *(int32_t *)patch;
428-
429- n_jump->wrote (0 );
430-
431- // Patch 5th byte (from jump instruction)
432- verified_entry[4 ] = code_buffer[4 ];
433-
434- n_jump->wrote (4 );
435-
436- // Patch bytes 0-3 (from jump instruction)
437- *(int32_t *)verified_entry = *(int32_t *)code_buffer;
438- // Invalidate. Opteron requires a flush after every write.
439- n_jump->wrote (0 );
440- #endif // _LP64
441-
442386}
443387
444388void NativeIllegalInstruction::insert (address code_pos) {
@@ -455,9 +399,7 @@ void NativeGeneralJump::verify() {
455399
456400void NativeGeneralJump::insert_unconditional (address code_pos, address entry) {
457401 intptr_t disp = (intptr_t )entry - ((intptr_t )code_pos + 1 + 4 );
458- #ifdef AMD64
459402 guarantee (disp == (intptr_t )(int32_t )disp, " must be 32-bit offset" );
460- #endif // AMD64
461403
462404 *code_pos = unconditional_long_jump;
463405 *((int32_t *)(code_pos+1 )) = (int32_t ) disp;
0 commit comments