@@ -1473,7 +1473,7 @@ void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in
1473
1473
// reverse bytes in halfword in lower 16 bits and sign-extend
1474
1474
// Rd[15:0] = Rs[7:0] Rs[15:8] (sign-extend to 64 bits)
1475
1475
void MacroAssembler::revb_h_h (Register Rd, Register Rs, Register tmp) {
1476
- if (UseRVB ) {
1476
+ if (UseZbb ) {
1477
1477
rev8 (Rd, Rs);
1478
1478
srai (Rd, Rd, 48 );
1479
1479
return ;
@@ -1490,7 +1490,7 @@ void MacroAssembler::revb_h_h(Register Rd, Register Rs, Register tmp) {
1490
1490
// reverse bytes in lower word and sign-extend
1491
1491
// Rd[31:0] = Rs[7:0] Rs[15:8] Rs[23:16] Rs[31:24] (sign-extend to 64 bits)
1492
1492
void MacroAssembler::revb_w_w (Register Rd, Register Rs, Register tmp1, Register tmp2) {
1493
- if (UseRVB ) {
1493
+ if (UseZbb ) {
1494
1494
rev8 (Rd, Rs);
1495
1495
srai (Rd, Rd, 32 );
1496
1496
return ;
@@ -1507,7 +1507,7 @@ void MacroAssembler::revb_w_w(Register Rd, Register Rs, Register tmp1, Register
1507
1507
// reverse bytes in halfword in lower 16 bits and zero-extend
1508
1508
// Rd[15:0] = Rs[7:0] Rs[15:8] (zero-extend to 64 bits)
1509
1509
void MacroAssembler::revb_h_h_u (Register Rd, Register Rs, Register tmp) {
1510
- if (UseRVB ) {
1510
+ if (UseZbb ) {
1511
1511
rev8 (Rd, Rs);
1512
1512
srli (Rd, Rd, 48 );
1513
1513
return ;
@@ -1524,11 +1524,11 @@ void MacroAssembler::revb_h_h_u(Register Rd, Register Rs, Register tmp) {
1524
1524
// reverse bytes in halfwords in lower 32 bits and zero-extend
1525
1525
// Rd[31:0] = Rs[23:16] Rs[31:24] Rs[7:0] Rs[15:8] (zero-extend to 64 bits)
1526
1526
void MacroAssembler::revb_h_w_u (Register Rd, Register Rs, Register tmp1, Register tmp2) {
1527
- if (UseRVB ) {
1527
+ if (UseZbb ) {
1528
1528
rev8 (Rd, Rs);
1529
1529
rori (Rd, Rd, 32 );
1530
1530
roriw (Rd, Rd, 16 );
1531
- zext_w (Rd, Rd);
1531
+ zero_extend (Rd, Rd, 32 );
1532
1532
return ;
1533
1533
}
1534
1534
assert_different_registers (Rs, tmp1, tmp2);
@@ -1557,16 +1557,16 @@ void MacroAssembler::revb_h_helper(Register Rd, Register Rs, Register tmp1, Regi
1557
1557
// reverse bytes in each halfword
1558
1558
// Rd[63:0] = Rs[55:48] Rs[63:56] Rs[39:32] Rs[47:40] Rs[23:16] Rs[31:24] Rs[7:0] Rs[15:8]
1559
1559
void MacroAssembler::revb_h (Register Rd, Register Rs, Register tmp1, Register tmp2) {
1560
- if (UseRVB ) {
1560
+ if (UseZbb ) {
1561
1561
assert_different_registers (Rs, tmp1);
1562
1562
assert_different_registers (Rd, tmp1);
1563
1563
rev8 (Rd, Rs);
1564
- zext_w (tmp1, Rd);
1564
+ zero_extend (tmp1, Rd, 32 );
1565
1565
roriw (tmp1, tmp1, 16 );
1566
1566
slli (tmp1, tmp1, 32 );
1567
1567
srli (Rd, Rd, 32 );
1568
1568
roriw (Rd, Rd, 16 );
1569
- zext_w (Rd, Rd);
1569
+ zero_extend (Rd, Rd, 32 );
1570
1570
orr (Rd, Rd, tmp1);
1571
1571
return ;
1572
1572
}
@@ -1581,7 +1581,7 @@ void MacroAssembler::revb_h(Register Rd, Register Rs, Register tmp1, Register tm
1581
1581
// reverse bytes in each word
1582
1582
// Rd[63:0] = Rs[39:32] Rs[47:40] Rs[55:48] Rs[63:56] Rs[7:0] Rs[15:8] Rs[23:16] Rs[31:24]
1583
1583
void MacroAssembler::revb_w (Register Rd, Register Rs, Register tmp1, Register tmp2) {
1584
- if (UseRVB ) {
1584
+ if (UseZbb ) {
1585
1585
rev8 (Rd, Rs);
1586
1586
rori (Rd, Rd, 32 );
1587
1587
return ;
@@ -1595,7 +1595,7 @@ void MacroAssembler::revb_w(Register Rd, Register Rs, Register tmp1, Register tm
1595
1595
// reverse bytes in doubleword
1596
1596
// Rd[63:0] = Rs[7:0] Rs[15:8] Rs[23:16] Rs[31:24] Rs[39:32] Rs[47,40] Rs[55,48] Rs[63:56]
1597
1597
void MacroAssembler::revb (Register Rd, Register Rs, Register tmp1, Register tmp2) {
1598
- if (UseRVB ) {
1598
+ if (UseZbb ) {
1599
1599
rev8 (Rd, Rs);
1600
1600
return ;
1601
1601
}
@@ -1617,7 +1617,7 @@ void MacroAssembler::revb(Register Rd, Register Rs, Register tmp1, Register tmp2
1617
1617
// rotate right with shift bits
1618
1618
void MacroAssembler::ror_imm (Register dst, Register src, uint32_t shift, Register tmp)
1619
1619
{
1620
- if (UseRVB ) {
1620
+ if (UseZbb ) {
1621
1621
rori (dst, src, shift);
1622
1622
return ;
1623
1623
}
@@ -3563,7 +3563,7 @@ void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Regi
3563
3563
// shift 16 bits once.
3564
3564
void MacroAssembler::ctzc_bit (Register Rd, Register Rs, bool isLL, Register tmp1, Register tmp2)
3565
3565
{
3566
- if (UseRVB ) {
3566
+ if (UseZbb ) {
3567
3567
assert_different_registers (Rd, Rs, tmp1);
3568
3568
int step = isLL ? 8 : 16 ;
3569
3569
ctz (Rd, Rs);
@@ -3905,7 +3905,7 @@ void MacroAssembler::zero_memory(Register addr, Register len, Register tmp) {
3905
3905
// shift left by shamt and add
3906
3906
// Rd = (Rs1 << shamt) + Rs2
3907
3907
void MacroAssembler::shadd (Register Rd, Register Rs1, Register Rs2, Register tmp, int shamt) {
3908
- if (UseRVB ) {
3908
+ if (UseZba ) {
3909
3909
if (shamt == 1 ) {
3910
3910
sh1add (Rd, Rs1, Rs2);
3911
3911
return ;
@@ -3927,14 +3927,14 @@ void MacroAssembler::shadd(Register Rd, Register Rs1, Register Rs2, Register tmp
3927
3927
}
3928
3928
3929
3929
void MacroAssembler::zero_extend (Register dst, Register src, int bits) {
3930
- if (UseRVB ) {
3931
- if (bits == 16 ) {
3932
- zext_h (dst, src) ;
3933
- return ;
3934
- } else if (bits == 32 ) {
3935
- zext_w (dst, src);
3936
- return ;
3937
- }
3930
+ if (UseZba && bits == 32 ) {
3931
+ zext_w (dst, src);
3932
+ return ;
3933
+ }
3934
+
3935
+ if (UseZbb && bits == 16 ) {
3936
+ zext_h (dst, src) ;
3937
+ return ;
3938
3938
}
3939
3939
3940
3940
if (bits == 8 ) {
@@ -3946,7 +3946,7 @@ void MacroAssembler::zero_extend(Register dst, Register src, int bits) {
3946
3946
}
3947
3947
3948
3948
void MacroAssembler::sign_extend (Register dst, Register src, int bits) {
3949
- if (UseRVB ) {
3949
+ if (UseZbb ) {
3950
3950
if (bits == 8 ) {
3951
3951
sext_b (dst, src);
3952
3952
return ;
0 commit comments