@@ -318,8 +318,9 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
318318
319319 setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom);
320320
321- if (!Subtarget.hasStdExtZbb() && !Subtarget.hasVendorXTHeadBb() &&
322- !Subtarget.hasVendorXqcibm() && !Subtarget.hasVendorXAndesPerf() &&
321+ if (!Subtarget.hasStdExtZbb() && !Subtarget.hasStdExtP() &&
322+ !Subtarget.hasVendorXTHeadBb() && !Subtarget.hasVendorXqcibm() &&
323+ !Subtarget.hasVendorXAndesPerf() &&
323324 !(Subtarget.hasVendorXCValu() && !Subtarget.is64Bit()))
324325 setOperationAction(ISD::SIGN_EXTEND_INREG, {MVT::i8, MVT::i16}, Expand);
325326
@@ -392,7 +393,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
392393 setOperationAction(ISD::BITREVERSE, MVT::i8, Custom);
393394 }
394395
395- if (Subtarget.hasStdExtZbb() ||
396+ if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtP() ||
396397 (Subtarget.hasVendorXCValu() && !Subtarget.is64Bit())) {
397398 setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, XLenVT,
398399 Legal);
@@ -403,6 +404,9 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
403404 setOperationAction({ISD::CTTZ, ISD::CTTZ_ZERO_UNDEF}, MVT::i32, Custom);
404405 } else {
405406 setOperationAction(ISD::CTTZ, XLenVT, Expand);
407+ // If have a CLZW, but not CTZW, custom promote i32.
408+ if (Subtarget.hasStdExtP() && Subtarget.is64Bit())
409+ setOperationAction({ISD::CTTZ, ISD::CTTZ_ZERO_UNDEF}, MVT::i32, Custom);
406410 }
407411
408412 if (!Subtarget.hasCPOPLike()) {
@@ -419,13 +423,15 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
419423 // We need the custom lowering to make sure that the resulting sequence
420424 // for the 32bit case is efficient on 64bit targets.
421425 // Use default promotion for i32 without Zbb.
422- if (Subtarget.is64Bit() && Subtarget.hasStdExtZbb())
426+ if (Subtarget.is64Bit() &&
427+ (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtP()))
423428 setOperationAction({ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF}, MVT::i32, Custom);
424429 } else {
425430 setOperationAction(ISD::CTLZ, XLenVT, Expand);
426431 }
427432
428- if (Subtarget.hasVendorXCValu() && !Subtarget.is64Bit()) {
433+ if (Subtarget.hasStdExtP() ||
434+ (Subtarget.hasVendorXCValu() && !Subtarget.is64Bit())) {
429435 setOperationAction(ISD::ABS, XLenVT, Legal);
430436 } else if (Subtarget.hasShortForwardBranchOpt()) {
431437 // We can use PseudoCCSUB to implement ABS.
@@ -14669,6 +14675,25 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
1466914675 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
1467014676 bool IsCTZ =
1467114677 N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF;
14678+
14679+ // Without Zbb, lower as 32 - clzw(~X & (X-1))
14680+ if (IsCTZ && !Subtarget.hasStdExtZbb()) {
14681+ assert(Subtarget.hasStdExtP());
14682+
14683+ NewOp0 = DAG.getFreeze(NewOp0);
14684+ SDValue Not = DAG.getNOT(DL, NewOp0, MVT::i64);
14685+ SDValue Minus1 = DAG.getNode(ISD::SUB, DL, MVT::i64, NewOp0,
14686+ DAG.getConstant(1, DL, MVT::i64));
14687+ SDValue And = DAG.getNode(ISD::AND, DL, MVT::i64, Not, Minus1);
14688+ SDValue CLZW = DAG.getNode(RISCVISD::CLZW, DL, MVT::i64, And);
14689+ SDValue Sub = DAG.getNode(ISD::SUB, DL, MVT::i64,
14690+ DAG.getConstant(32, DL, MVT::i64), CLZW);
14691+ SDValue Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Sub,
14692+ DAG.getValueType(MVT::i32));
14693+ Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
14694+ return;
14695+ }
14696+
1467214697 unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW;
1467314698 SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0);
1467414699 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
0 commit comments