@@ -6299,7 +6299,7 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
62996299 orl (value, rtmp);
63006300 }
63016301
6302- cmpl (count, 2 <<shift); // Short arrays (< 8 bytes) fill by element
6302+ cmpptr (count, 2 <<shift); // Short arrays (< 8 bytes) fill by element
63036303 jcc (Assembler::below, L_fill_4_bytes); // use unsigned cmp
63046304 if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) {
63056305 Label L_skip_align2;
@@ -6319,13 +6319,13 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
63196319 jccb (Assembler::zero, L_skip_align2);
63206320 movw (Address (to, 0 ), value);
63216321 addptr (to, 2 );
6322- subl (count, 1 <<(shift-1 ));
6322+ subptr (count, 1 <<(shift-1 ));
63236323 BIND (L_skip_align2);
63246324 }
63256325 if (UseSSE < 2 ) {
63266326 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes;
63276327 // Fill 32-byte chunks
6328- subl (count, 8 << shift);
6328+ subptr (count, 8 << shift);
63296329 jcc (Assembler::less, L_check_fill_8_bytes);
63306330 align (16 );
63316331
@@ -6336,10 +6336,10 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
63366336 }
63376337
63386338 addptr (to, 32 );
6339- subl (count, 8 << shift);
6339+ subptr (count, 8 << shift);
63406340 jcc (Assembler::greaterEqual, L_fill_32_bytes_loop);
63416341 BIND (L_check_fill_8_bytes);
6342- addl (count, 8 << shift);
6342+ addptr (count, 8 << shift);
63436343 jccb (Assembler::zero, L_exit);
63446344 jmpb (L_fill_8_bytes);
63456345
@@ -6351,7 +6351,7 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
63516351 movl (Address (to, 4 ), value);
63526352 addptr (to, 8 );
63536353 BIND (L_fill_8_bytes);
6354- subl (count, 1 << (shift + 1 ));
6354+ subptr (count, 1 << (shift + 1 ));
63556355 jcc (Assembler::greaterEqual, L_fill_8_bytes_loop);
63566356 // fall through to fill 4 bytes
63576357 } else {
@@ -6362,7 +6362,7 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
63626362 jccb (Assembler::zero, L_fill_32_bytes);
63636363 movl (Address (to, 0 ), value);
63646364 addptr (to, 4 );
6365- subl (count, 1 <<shift);
6365+ subptr (count, 1 <<shift);
63666366 }
63676367 BIND (L_fill_32_bytes);
63686368 {
@@ -6376,19 +6376,19 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
63766376 Label L_fill_64_bytes_loop_avx3, L_check_fill_64_bytes_avx2;
63776377
63786378 // If number of bytes to fill < VM_Version::avx3_threshold(), perform fill using AVX2
6379- cmpl (count, VM_Version::avx3_threshold ());
6379+ cmpptr (count, VM_Version::avx3_threshold ());
63806380 jccb (Assembler::below, L_check_fill_64_bytes_avx2);
63816381
63826382 vpbroadcastd (xtmp, xtmp, Assembler::AVX_512bit);
63836383
6384- subl (count, 16 << shift);
6384+ subptr (count, 16 << shift);
63856385 jccb (Assembler::less, L_check_fill_32_bytes);
63866386 align (16 );
63876387
63886388 BIND (L_fill_64_bytes_loop_avx3);
63896389 evmovdqul (Address (to, 0 ), xtmp, Assembler::AVX_512bit);
63906390 addptr (to, 64 );
6391- subl (count, 16 << shift);
6391+ subptr (count, 16 << shift);
63926392 jcc (Assembler::greaterEqual, L_fill_64_bytes_loop_avx3);
63936393 jmpb (L_check_fill_32_bytes);
63946394
@@ -6398,23 +6398,23 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
63986398 Label L_fill_64_bytes_loop;
63996399 vpbroadcastd (xtmp, xtmp, Assembler::AVX_256bit);
64006400
6401- subl (count, 16 << shift);
6401+ subptr (count, 16 << shift);
64026402 jcc (Assembler::less, L_check_fill_32_bytes);
64036403 align (16 );
64046404
64056405 BIND (L_fill_64_bytes_loop);
64066406 vmovdqu (Address (to, 0 ), xtmp);
64076407 vmovdqu (Address (to, 32 ), xtmp);
64086408 addptr (to, 64 );
6409- subl (count, 16 << shift);
6409+ subptr (count, 16 << shift);
64106410 jcc (Assembler::greaterEqual, L_fill_64_bytes_loop);
64116411
64126412 BIND (L_check_fill_32_bytes);
6413- addl (count, 8 << shift);
6413+ addptr (count, 8 << shift);
64146414 jccb (Assembler::less, L_check_fill_8_bytes);
64156415 vmovdqu (Address (to, 0 ), xtmp);
64166416 addptr (to, 32 );
6417- subl (count, 8 << shift);
6417+ subptr (count, 8 << shift);
64186418
64196419 BIND (L_check_fill_8_bytes);
64206420 // clean upper bits of YMM registers
@@ -6424,7 +6424,7 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
64246424 // Fill 32-byte chunks
64256425 pshufd (xtmp, xtmp, 0 );
64266426
6427- subl (count, 8 << shift);
6427+ subptr (count, 8 << shift);
64286428 jcc (Assembler::less, L_check_fill_8_bytes);
64296429 align (16 );
64306430
@@ -6441,12 +6441,12 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
64416441 }
64426442
64436443 addptr (to, 32 );
6444- subl (count, 8 << shift);
6444+ subptr (count, 8 << shift);
64456445 jcc (Assembler::greaterEqual, L_fill_32_bytes_loop);
64466446
64476447 BIND (L_check_fill_8_bytes);
64486448 }
6449- addl (count, 8 << shift);
6449+ addptr (count, 8 << shift);
64506450 jccb (Assembler::zero, L_exit);
64516451 jmpb (L_fill_8_bytes);
64526452
@@ -6457,7 +6457,7 @@ void MacroAssembler::generate_fill(BasicType t, bool aligned,
64576457 movq (Address (to, 0 ), xtmp);
64586458 addptr (to, 8 );
64596459 BIND (L_fill_8_bytes);
6460- subl (count, 1 << (shift + 1 ));
6460+ subptr (count, 1 << (shift + 1 ));
64616461 jcc (Assembler::greaterEqual, L_fill_8_bytes_loop);
64626462 }
64636463 }
0 commit comments