diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index a68a3c14dc41d..ae9e2fef88673 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -421,8 +421,12 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, (Subtarget.hasVendorXCVbitmanip() && !Subtarget.is64Bit())) { // We need the custom lowering to make sure that the resulting sequence // for the 32bit case is efficient on 64bit targets. - if (Subtarget.is64Bit()) - setOperationAction({ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF}, MVT::i32, Custom); + if (Subtarget.is64Bit()) { + setOperationAction(ISD::CTLZ, MVT::i32, Custom); + // Use default promotion for XTHeadBb. + if (Subtarget.hasStdExtZbb()) + setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom); + } } else { setOperationAction(ISD::CTLZ, XLenVT, Expand); } diff --git a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll index 908a12331d1bb..99ed0a355b909 100644 --- a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll +++ b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll @@ -2004,9 +2004,8 @@ define i32 @test_ctlz_i32_zero_undef(i32 %a) nounwind { ; ; RV64XTHEADBB-LABEL: test_ctlz_i32_zero_undef: ; RV64XTHEADBB: # %bb.0: -; RV64XTHEADBB-NEXT: not a0, a0 ; RV64XTHEADBB-NEXT: slli a0, a0, 32 -; RV64XTHEADBB-NEXT: th.ff0 a0, a0 +; RV64XTHEADBB-NEXT: th.ff1 a0, a0 ; RV64XTHEADBB-NEXT: ret %tmp = call i32 @llvm.ctlz.i32(i32 %a, i1 true) ret i32 %tmp diff --git a/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll b/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll index d504d418c150b..f20ebca314b35 100644 --- a/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll +++ b/llvm/test/CodeGen/RISCV/rv64xtheadbb.ll @@ -240,10 +240,9 @@ define signext i32 @findLastSet_i32(i32 signext %a) nounwind { ; ; RV64XTHEADBB-NOB-LABEL: findLastSet_i32: ; RV64XTHEADBB-NOB: # %bb.0: -; RV64XTHEADBB-NOB-NEXT: not a1, a0 +; RV64XTHEADBB-NOB-NEXT: slli a1, a0, 32 ; RV64XTHEADBB-NOB-NEXT: snez a0, a0 -; RV64XTHEADBB-NOB-NEXT: slli a1, a1, 32 -; RV64XTHEADBB-NOB-NEXT: th.ff0 a1, a1 +; RV64XTHEADBB-NOB-NEXT: th.ff1 a1, a1 ; RV64XTHEADBB-NOB-NEXT: xori a1, a1, 31 ; RV64XTHEADBB-NOB-NEXT: addi a0, a0, -1 ; RV64XTHEADBB-NOB-NEXT: or a0, a0, a1