@@ -1426,7 +1426,7 @@ void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in
1426
1426
// reverse bytes in halfword in lower 16 bits and sign-extend
1427
1427
// Rd[15:0] = Rs[7:0] Rs[15:8] (sign-extend to 64 bits)
1428
1428
void MacroAssembler::revb_h_h (Register Rd, Register Rs, Register tmp) {
1429
- if (UseRVB ) {
1429
+ if (UseZbb ) {
1430
1430
rev8 (Rd, Rs);
1431
1431
srai (Rd, Rd, 48 );
1432
1432
return ;
@@ -1443,7 +1443,7 @@ void MacroAssembler::revb_h_h(Register Rd, Register Rs, Register tmp) {
1443
1443
// reverse bytes in lower word and sign-extend
1444
1444
// Rd[31:0] = Rs[7:0] Rs[15:8] Rs[23:16] Rs[31:24] (sign-extend to 64 bits)
1445
1445
void MacroAssembler::revb_w_w (Register Rd, Register Rs, Register tmp1, Register tmp2) {
1446
- if (UseRVB ) {
1446
+ if (UseZbb ) {
1447
1447
rev8 (Rd, Rs);
1448
1448
srai (Rd, Rd, 32 );
1449
1449
return ;
@@ -1460,7 +1460,7 @@ void MacroAssembler::revb_w_w(Register Rd, Register Rs, Register tmp1, Register
1460
1460
// reverse bytes in halfword in lower 16 bits and zero-extend
1461
1461
// Rd[15:0] = Rs[7:0] Rs[15:8] (zero-extend to 64 bits)
1462
1462
void MacroAssembler::revb_h_h_u (Register Rd, Register Rs, Register tmp) {
1463
- if (UseRVB ) {
1463
+ if (UseZbb ) {
1464
1464
rev8 (Rd, Rs);
1465
1465
srli (Rd, Rd, 48 );
1466
1466
return ;
@@ -1477,11 +1477,11 @@ void MacroAssembler::revb_h_h_u(Register Rd, Register Rs, Register tmp) {
1477
1477
// reverse bytes in halfwords in lower 32 bits and zero-extend
1478
1478
// Rd[31:0] = Rs[23:16] Rs[31:24] Rs[7:0] Rs[15:8] (zero-extend to 64 bits)
1479
1479
void MacroAssembler::revb_h_w_u (Register Rd, Register Rs, Register tmp1, Register tmp2) {
1480
- if (UseRVB ) {
1480
+ if (UseZbb ) {
1481
1481
rev8 (Rd, Rs);
1482
1482
rori (Rd, Rd, 32 );
1483
1483
roriw (Rd, Rd, 16 );
1484
- zext_w (Rd, Rd);
1484
+ zero_extend (Rd, Rd, 32 );
1485
1485
return ;
1486
1486
}
1487
1487
assert_different_registers (Rs, tmp1, tmp2);
@@ -1510,16 +1510,16 @@ void MacroAssembler::revb_h_helper(Register Rd, Register Rs, Register tmp1, Regi
1510
1510
// reverse bytes in each halfword
1511
1511
// Rd[63:0] = Rs[55:48] Rs[63:56] Rs[39:32] Rs[47:40] Rs[23:16] Rs[31:24] Rs[7:0] Rs[15:8]
1512
1512
void MacroAssembler::revb_h (Register Rd, Register Rs, Register tmp1, Register tmp2) {
1513
- if (UseRVB ) {
1513
+ if (UseZbb ) {
1514
1514
assert_different_registers (Rs, tmp1);
1515
1515
assert_different_registers (Rd, tmp1);
1516
1516
rev8 (Rd, Rs);
1517
- zext_w (tmp1, Rd);
1517
+ zero_extend (tmp1, Rd, 32 );
1518
1518
roriw (tmp1, tmp1, 16 );
1519
1519
slli (tmp1, tmp1, 32 );
1520
1520
srli (Rd, Rd, 32 );
1521
1521
roriw (Rd, Rd, 16 );
1522
- zext_w (Rd, Rd);
1522
+ zero_extend (Rd, Rd, 32 );
1523
1523
orr (Rd, Rd, tmp1);
1524
1524
return ;
1525
1525
}
@@ -1534,7 +1534,7 @@ void MacroAssembler::revb_h(Register Rd, Register Rs, Register tmp1, Register tm
1534
1534
// reverse bytes in each word
1535
1535
// Rd[63:0] = Rs[39:32] Rs[47:40] Rs[55:48] Rs[63:56] Rs[7:0] Rs[15:8] Rs[23:16] Rs[31:24]
1536
1536
void MacroAssembler::revb_w (Register Rd, Register Rs, Register tmp1, Register tmp2) {
1537
- if (UseRVB ) {
1537
+ if (UseZbb ) {
1538
1538
rev8 (Rd, Rs);
1539
1539
rori (Rd, Rd, 32 );
1540
1540
return ;
@@ -1548,7 +1548,7 @@ void MacroAssembler::revb_w(Register Rd, Register Rs, Register tmp1, Register tm
1548
1548
// reverse bytes in doubleword
1549
1549
// Rd[63:0] = Rs[7:0] Rs[15:8] Rs[23:16] Rs[31:24] Rs[39:32] Rs[47,40] Rs[55,48] Rs[63:56]
1550
1550
void MacroAssembler::revb (Register Rd, Register Rs, Register tmp1, Register tmp2) {
1551
- if (UseRVB ) {
1551
+ if (UseZbb ) {
1552
1552
rev8 (Rd, Rs);
1553
1553
return ;
1554
1554
}
@@ -1570,7 +1570,7 @@ void MacroAssembler::revb(Register Rd, Register Rs, Register tmp1, Register tmp2
1570
1570
// rotate right with shift bits
1571
1571
void MacroAssembler::ror_imm (Register dst, Register src, uint32_t shift, Register tmp)
1572
1572
{
1573
- if (UseRVB ) {
1573
+ if (UseZbb ) {
1574
1574
rori (dst, src, shift);
1575
1575
return ;
1576
1576
}
@@ -3708,7 +3708,7 @@ void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Regi
3708
3708
// shift 16 bits once.
3709
3709
void MacroAssembler::ctzc_bit (Register Rd, Register Rs, bool isLL, Register tmp1, Register tmp2)
3710
3710
{
3711
- if (UseRVB ) {
3711
+ if (UseZbb ) {
3712
3712
assert_different_registers (Rd, Rs, tmp1);
3713
3713
int step = isLL ? 8 : 16 ;
3714
3714
ctz (Rd, Rs);
@@ -4050,7 +4050,7 @@ void MacroAssembler::zero_memory(Register addr, Register len, Register tmp) {
4050
4050
// shift left by shamt and add
4051
4051
// Rd = (Rs1 << shamt) + Rs2
4052
4052
void MacroAssembler::shadd (Register Rd, Register Rs1, Register Rs2, Register tmp, int shamt) {
4053
- if (UseRVB ) {
4053
+ if (UseZba ) {
4054
4054
if (shamt == 1 ) {
4055
4055
sh1add (Rd, Rs1, Rs2);
4056
4056
return ;
@@ -4072,14 +4072,14 @@ void MacroAssembler::shadd(Register Rd, Register Rs1, Register Rs2, Register tmp
4072
4072
}
4073
4073
4074
4074
void MacroAssembler::zero_extend (Register dst, Register src, int bits) {
4075
- if (UseRVB ) {
4076
- if (bits == 16 ) {
4077
- zext_h (dst, src) ;
4078
- return ;
4079
- } else if (bits == 32 ) {
4080
- zext_w (dst, src);
4081
- return ;
4082
- }
4075
+ if (UseZba && bits == 32 ) {
4076
+ zext_w (dst, src);
4077
+ return ;
4078
+ }
4079
+
4080
+ if (UseZbb && bits == 16 ) {
4081
+ zext_h (dst, src) ;
4082
+ return ;
4083
4083
}
4084
4084
4085
4085
if (bits == 8 ) {
@@ -4091,7 +4091,7 @@ void MacroAssembler::zero_extend(Register dst, Register src, int bits) {
4091
4091
}
4092
4092
4093
4093
void MacroAssembler::sign_extend (Register dst, Register src, int bits) {
4094
- if (UseRVB ) {
4094
+ if (UseZbb ) {
4095
4095
if (bits == 8 ) {
4096
4096
sext_b (dst, src);
4097
4097
return ;
0 commit comments